2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/limits.h>
38 #include <sys/mutex.h>
40 #include <sys/sysctl.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/eventhandler.h>
45 #include <sys/kthread.h>
46 #include <sys/sched.h>
47 #include <sys/taskqueue.h>
48 #include <sys/vnode.h>
51 #include <sys/stack.h>
55 #include <vm/vm_kern.h>
56 #include <geom/geom.h>
58 #include <geom/journal/g_journal.h>
60 FEATURE(geom_journal, "GEOM journaling support");
63 * On-disk journal format:
68 * %%%%%% ****** +------+ +------+ ****** +------+ %%%%%%
69 * % JH % * RH * | Data | | Data | ... * RH * | Data | ... % JH % ...
70 * %%%%%% ****** +------+ +------+ ****** +------+ %%%%%%
74 CTASSERT(sizeof(struct g_journal_header) <= 512);
75 CTASSERT(sizeof(struct g_journal_record_header) <= 512);
77 static MALLOC_DEFINE(M_JOURNAL, "journal_data", "GEOM_JOURNAL Data");
78 static struct mtx g_journal_cache_mtx;
79 MTX_SYSINIT(g_journal_cache, &g_journal_cache_mtx, "cache usage", MTX_DEF);
81 const struct g_journal_desc *g_journal_filesystems[] = {
86 SYSCTL_DECL(_kern_geom);
88 int g_journal_debug = 0;
89 static u_int g_journal_switch_time = 10;
90 static u_int g_journal_force_switch = 70;
91 static u_int g_journal_parallel_flushes = 16;
92 static u_int g_journal_parallel_copies = 16;
93 static u_int g_journal_accept_immediately = 64;
94 static u_int g_journal_record_entries = GJ_RECORD_HEADER_NENTRIES;
95 static u_int g_journal_do_optimize = 1;
97 static SYSCTL_NODE(_kern_geom, OID_AUTO, journal, CTLFLAG_RW, 0,
98 "GEOM_JOURNAL stuff");
99 SYSCTL_INT(_kern_geom_journal, OID_AUTO, debug, CTLFLAG_RWTUN, &g_journal_debug, 0,
101 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, switch_time, CTLFLAG_RW,
102 &g_journal_switch_time, 0, "Switch journals every N seconds");
103 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, force_switch, CTLFLAG_RW,
104 &g_journal_force_switch, 0, "Force switch when journal is N% full");
105 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_flushes, CTLFLAG_RW,
106 &g_journal_parallel_flushes, 0,
107 "Number of flush I/O requests to send in parallel");
108 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, accept_immediately, CTLFLAG_RW,
109 &g_journal_accept_immediately, 0,
110 "Number of I/O requests accepted immediately");
111 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_copies, CTLFLAG_RW,
112 &g_journal_parallel_copies, 0,
113 "Number of copy I/O requests to send in parallel");
115 g_journal_record_entries_sysctl(SYSCTL_HANDLER_ARGS)
120 entries = g_journal_record_entries;
121 error = sysctl_handle_int(oidp, &entries, 0, req);
122 if (error != 0 || req->newptr == NULL)
124 if (entries < 1 || entries > GJ_RECORD_HEADER_NENTRIES)
126 g_journal_record_entries = entries;
129 SYSCTL_PROC(_kern_geom_journal, OID_AUTO, record_entries,
130 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_record_entries_sysctl, "I",
131 "Maximum number of entires in one journal record");
132 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, optimize, CTLFLAG_RW,
133 &g_journal_do_optimize, 0, "Try to combine bios on flush and copy");
135 static u_long g_journal_cache_used = 0;
136 static u_long g_journal_cache_limit = 64 * 1024 * 1024;
137 static u_int g_journal_cache_divisor = 2;
138 static u_int g_journal_cache_switch = 90;
139 static u_int g_journal_cache_misses = 0;
140 static u_int g_journal_cache_alloc_failures = 0;
141 static u_long g_journal_cache_low = 0;
143 static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, cache, CTLFLAG_RW, 0,
144 "GEOM_JOURNAL cache");
145 SYSCTL_ULONG(_kern_geom_journal_cache, OID_AUTO, used, CTLFLAG_RD,
146 &g_journal_cache_used, 0, "Number of allocated bytes");
148 g_journal_cache_limit_sysctl(SYSCTL_HANDLER_ARGS)
153 limit = g_journal_cache_limit;
154 error = sysctl_handle_long(oidp, &limit, 0, req);
155 if (error != 0 || req->newptr == NULL)
157 g_journal_cache_limit = limit;
158 g_journal_cache_low = (limit / 100) * g_journal_cache_switch;
161 SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, limit,
162 CTLTYPE_ULONG | CTLFLAG_RWTUN, NULL, 0, g_journal_cache_limit_sysctl, "I",
163 "Maximum number of allocated bytes");
164 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, divisor, CTLFLAG_RDTUN,
165 &g_journal_cache_divisor, 0,
166 "(kmem_size / kern.geom.journal.cache.divisor) == cache size");
168 g_journal_cache_switch_sysctl(SYSCTL_HANDLER_ARGS)
173 cswitch = g_journal_cache_switch;
174 error = sysctl_handle_int(oidp, &cswitch, 0, req);
175 if (error != 0 || req->newptr == NULL)
179 g_journal_cache_switch = cswitch;
180 g_journal_cache_low = (g_journal_cache_limit / 100) * cswitch;
183 SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, switch,
184 CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_switch_sysctl, "I",
185 "Force switch when we hit this percent of cache use");
186 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, misses, CTLFLAG_RW,
187 &g_journal_cache_misses, 0, "Number of cache misses");
188 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, alloc_failures, CTLFLAG_RW,
189 &g_journal_cache_alloc_failures, 0, "Memory allocation failures");
191 static u_long g_journal_stats_bytes_skipped = 0;
192 static u_long g_journal_stats_combined_ios = 0;
193 static u_long g_journal_stats_switches = 0;
194 static u_long g_journal_stats_wait_for_copy = 0;
195 static u_long g_journal_stats_journal_full = 0;
196 static u_long g_journal_stats_low_mem = 0;
198 static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, stats, CTLFLAG_RW, 0,
199 "GEOM_JOURNAL statistics");
200 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, skipped_bytes, CTLFLAG_RW,
201 &g_journal_stats_bytes_skipped, 0, "Number of skipped bytes");
202 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, combined_ios, CTLFLAG_RW,
203 &g_journal_stats_combined_ios, 0, "Number of combined I/O requests");
204 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, switches, CTLFLAG_RW,
205 &g_journal_stats_switches, 0, "Number of journal switches");
206 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, wait_for_copy, CTLFLAG_RW,
207 &g_journal_stats_wait_for_copy, 0, "Wait for journal copy on switch");
208 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, journal_full, CTLFLAG_RW,
209 &g_journal_stats_journal_full, 0,
210 "Number of times journal was almost full.");
211 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, low_mem, CTLFLAG_RW,
212 &g_journal_stats_low_mem, 0, "Number of times low_mem hook was called.");
214 static g_taste_t g_journal_taste;
215 static g_ctl_req_t g_journal_config;
216 static g_dumpconf_t g_journal_dumpconf;
217 static g_init_t g_journal_init;
218 static g_fini_t g_journal_fini;
220 struct g_class g_journal_class = {
221 .name = G_JOURNAL_CLASS_NAME,
222 .version = G_VERSION,
223 .taste = g_journal_taste,
224 .ctlreq = g_journal_config,
225 .dumpconf = g_journal_dumpconf,
226 .init = g_journal_init,
227 .fini = g_journal_fini
230 static int g_journal_destroy(struct g_journal_softc *sc);
231 static void g_journal_metadata_update(struct g_journal_softc *sc);
232 static void g_journal_start_switcher(struct g_class *mp);
233 static void g_journal_stop_switcher(void);
234 static void g_journal_switch_wait(struct g_journal_softc *sc);
236 #define GJ_SWITCHER_WORKING 0
237 #define GJ_SWITCHER_DIE 1
238 #define GJ_SWITCHER_DIED 2
239 static struct proc *g_journal_switcher_proc = NULL;
240 static int g_journal_switcher_state = GJ_SWITCHER_WORKING;
241 static int g_journal_switcher_wokenup = 0;
242 static int g_journal_sync_requested = 0;
247 struct stack mi_stack;
252 * We use our own malloc/realloc/free funtions, so we can collect statistics
253 * and force journal switch when we're running out of cache.
256 gj_malloc(size_t size, int flags)
263 mtx_lock(&g_journal_cache_mtx);
264 if (g_journal_cache_limit > 0 && !g_journal_switcher_wokenup &&
265 g_journal_cache_used + size > g_journal_cache_low) {
266 GJ_DEBUG(1, "No cache, waking up the switcher.");
267 g_journal_switcher_wokenup = 1;
268 wakeup(&g_journal_switcher_state);
270 if ((flags & M_NOWAIT) && g_journal_cache_limit > 0 &&
271 g_journal_cache_used + size > g_journal_cache_limit) {
272 mtx_unlock(&g_journal_cache_mtx);
273 g_journal_cache_alloc_failures++;
276 g_journal_cache_used += size;
277 mtx_unlock(&g_journal_cache_mtx);
280 p = malloc(size, M_JOURNAL, flags | M_WAITOK);
282 mi = malloc(sizeof(*mi) + size, M_JOURNAL, flags | M_WAITOK);
283 p = (u_char *)mi + sizeof(*mi);
285 stack_save(&mi->mi_stack);
291 gj_free(void *p, size_t size)
297 KASSERT(p != NULL, ("p=NULL"));
298 KASSERT(size > 0, ("size=0"));
299 mtx_lock(&g_journal_cache_mtx);
300 KASSERT(g_journal_cache_used >= size, ("Freeing too much?"));
301 g_journal_cache_used -= size;
302 mtx_unlock(&g_journal_cache_mtx);
304 mi = p = (void *)((u_char *)p - sizeof(*mi));
305 if (mi->mi_size != size) {
306 printf("GJOURNAL: Size mismatch! %zu != %zu\n", size,
308 printf("GJOURNAL: Alloc backtrace:\n");
309 stack_print(&mi->mi_stack);
310 printf("GJOURNAL: Free backtrace:\n");
318 gj_realloc(void *p, size_t size, size_t oldsize)
323 mtx_lock(&g_journal_cache_mtx);
324 g_journal_cache_used -= oldsize;
325 g_journal_cache_used += size;
326 mtx_unlock(&g_journal_cache_mtx);
327 np = realloc(p, size, M_JOURNAL, M_WAITOK);
329 np = gj_malloc(size, M_WAITOK);
330 bcopy(p, np, MIN(oldsize, size));
337 g_journal_check_overflow(struct g_journal_softc *sc)
341 if ((sc->sc_active.jj_offset < sc->sc_inactive.jj_offset &&
342 sc->sc_journal_offset >= sc->sc_inactive.jj_offset) ||
343 (sc->sc_active.jj_offset > sc->sc_inactive.jj_offset &&
344 sc->sc_journal_offset >= sc->sc_inactive.jj_offset &&
345 sc->sc_journal_offset < sc->sc_active.jj_offset)) {
346 panic("Journal overflow "
347 "(id = %u joffset=%jd active=%jd inactive=%jd)",
349 (intmax_t)sc->sc_journal_offset,
350 (intmax_t)sc->sc_active.jj_offset,
351 (intmax_t)sc->sc_inactive.jj_offset);
353 if (sc->sc_active.jj_offset < sc->sc_inactive.jj_offset) {
354 length = sc->sc_inactive.jj_offset - sc->sc_active.jj_offset;
355 used = sc->sc_journal_offset - sc->sc_active.jj_offset;
357 length = sc->sc_jend - sc->sc_active.jj_offset;
358 length += sc->sc_inactive.jj_offset - sc->sc_jstart;
359 if (sc->sc_journal_offset >= sc->sc_active.jj_offset)
360 used = sc->sc_journal_offset - sc->sc_active.jj_offset;
362 used = sc->sc_jend - sc->sc_active.jj_offset;
363 used += sc->sc_journal_offset - sc->sc_jstart;
366 /* Already woken up? */
367 if (g_journal_switcher_wokenup)
370 * If the active journal takes more than g_journal_force_switch precent
371 * of free journal space, we force journal switch.
374 ("length=%jd used=%jd active=%jd inactive=%jd joffset=%jd",
375 (intmax_t)length, (intmax_t)used,
376 (intmax_t)sc->sc_active.jj_offset,
377 (intmax_t)sc->sc_inactive.jj_offset,
378 (intmax_t)sc->sc_journal_offset));
379 if ((used * 100) / length > g_journal_force_switch) {
380 g_journal_stats_journal_full++;
381 GJ_DEBUG(1, "Journal %s %jd%% full, forcing journal switch.",
382 sc->sc_name, (used * 100) / length);
383 mtx_lock(&g_journal_cache_mtx);
384 g_journal_switcher_wokenup = 1;
385 wakeup(&g_journal_switcher_state);
386 mtx_unlock(&g_journal_cache_mtx);
391 g_journal_orphan(struct g_consumer *cp)
393 struct g_journal_softc *sc;
398 sc = cp->geom->softc;
399 strlcpy(name, cp->provider->name, sizeof(name));
400 GJ_DEBUG(0, "Lost provider %s.", name);
403 error = g_journal_destroy(sc);
405 GJ_DEBUG(0, "Journal %s destroyed.", name);
407 GJ_DEBUG(0, "Cannot destroy journal %s (error=%d). "
408 "Destroy it manually after last close.", sc->sc_name,
414 g_journal_access(struct g_provider *pp, int acr, int acw, int ace)
416 struct g_journal_softc *sc;
420 GJ_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name,
427 sc = pp->geom->softc;
428 if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY)) {
429 if (acr <= 0 && acw <= 0 && ace <= 0)
434 if (pp->acw == 0 && dcw > 0) {
435 GJ_DEBUG(1, "Marking %s as dirty.", sc->sc_name);
436 sc->sc_flags &= ~GJF_DEVICE_CLEAN;
438 g_journal_metadata_update(sc);
440 } /* else if (pp->acw == 0 && dcw > 0 && JEMPTY(sc)) {
441 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
442 sc->sc_flags |= GJF_DEVICE_CLEAN;
444 g_journal_metadata_update(sc);
451 g_journal_header_encode(struct g_journal_header *hdr, u_char *data)
454 bcopy(GJ_HEADER_MAGIC, data, sizeof(GJ_HEADER_MAGIC));
455 data += sizeof(GJ_HEADER_MAGIC);
456 le32enc(data, hdr->jh_journal_id);
458 le32enc(data, hdr->jh_journal_next_id);
462 g_journal_header_decode(const u_char *data, struct g_journal_header *hdr)
465 bcopy(data, hdr->jh_magic, sizeof(hdr->jh_magic));
466 data += sizeof(hdr->jh_magic);
467 if (bcmp(hdr->jh_magic, GJ_HEADER_MAGIC, sizeof(GJ_HEADER_MAGIC)) != 0)
469 hdr->jh_journal_id = le32dec(data);
471 hdr->jh_journal_next_id = le32dec(data);
476 g_journal_flush_cache(struct g_journal_softc *sc)
481 if (sc->sc_bio_flush == 0)
483 GJ_TIMER_START(1, &bt);
484 if (sc->sc_bio_flush & GJ_FLUSH_JOURNAL) {
485 error = g_io_flush(sc->sc_jconsumer);
486 GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
487 sc->sc_jconsumer->provider->name, error);
489 if (sc->sc_bio_flush & GJ_FLUSH_DATA) {
491 * TODO: This could be called in parallel with the
494 error = g_io_flush(sc->sc_dconsumer);
495 GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
496 sc->sc_dconsumer->provider->name, error);
498 GJ_TIMER_STOP(1, &bt, "Cache flush time");
502 g_journal_write_header(struct g_journal_softc *sc)
504 struct g_journal_header hdr;
505 struct g_consumer *cp;
509 cp = sc->sc_jconsumer;
510 buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
512 strlcpy(hdr.jh_magic, GJ_HEADER_MAGIC, sizeof(hdr.jh_magic));
513 hdr.jh_journal_id = sc->sc_journal_id;
514 hdr.jh_journal_next_id = sc->sc_journal_next_id;
515 g_journal_header_encode(&hdr, buf);
516 error = g_write_data(cp, sc->sc_journal_offset, buf,
517 cp->provider->sectorsize);
518 /* if (error == 0) */
519 sc->sc_journal_offset += cp->provider->sectorsize;
521 gj_free(buf, cp->provider->sectorsize);
526 * Every journal record has a header and data following it.
527 * Functions below are used to decode the header before storing it to
528 * little endian and to encode it after reading to system endianness.
531 g_journal_record_header_encode(struct g_journal_record_header *hdr,
534 struct g_journal_entry *ent;
537 bcopy(GJ_RECORD_HEADER_MAGIC, data, sizeof(GJ_RECORD_HEADER_MAGIC));
538 data += sizeof(GJ_RECORD_HEADER_MAGIC);
539 le32enc(data, hdr->jrh_journal_id);
541 le16enc(data, hdr->jrh_nentries);
543 bcopy(hdr->jrh_sum, data, sizeof(hdr->jrh_sum));
545 for (i = 0; i < hdr->jrh_nentries; i++) {
546 ent = &hdr->jrh_entries[i];
547 le64enc(data, ent->je_joffset);
549 le64enc(data, ent->je_offset);
551 le64enc(data, ent->je_length);
557 g_journal_record_header_decode(const u_char *data,
558 struct g_journal_record_header *hdr)
560 struct g_journal_entry *ent;
563 bcopy(data, hdr->jrh_magic, sizeof(hdr->jrh_magic));
564 data += sizeof(hdr->jrh_magic);
565 if (strcmp(hdr->jrh_magic, GJ_RECORD_HEADER_MAGIC) != 0)
567 hdr->jrh_journal_id = le32dec(data);
569 hdr->jrh_nentries = le16dec(data);
571 if (hdr->jrh_nentries > GJ_RECORD_HEADER_NENTRIES)
573 bcopy(data, hdr->jrh_sum, sizeof(hdr->jrh_sum));
575 for (i = 0; i < hdr->jrh_nentries; i++) {
576 ent = &hdr->jrh_entries[i];
577 ent->je_joffset = le64dec(data);
579 ent->je_offset = le64dec(data);
581 ent->je_length = le64dec(data);
588 * Function reads metadata from a provider (via the given consumer), decodes
589 * it to system endianness and verifies its correctness.
592 g_journal_metadata_read(struct g_consumer *cp, struct g_journal_metadata *md)
594 struct g_provider *pp;
600 error = g_access(cp, 1, 0, 0);
605 /* Metadata is stored in last sector. */
606 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
609 g_access(cp, -1, 0, 0);
611 GJ_DEBUG(1, "Cannot read metadata from %s (error=%d).",
612 cp->provider->name, error);
616 /* Decode metadata. */
617 error = journal_metadata_decode(buf, md);
619 /* Is this is gjournal provider at all? */
620 if (strcmp(md->md_magic, G_JOURNAL_MAGIC) != 0)
623 * Are we able to handle this version of metadata?
624 * We only maintain backward compatibility.
626 if (md->md_version > G_JOURNAL_VERSION) {
628 "Kernel module is too old to handle metadata from %s.",
632 /* Is checksum correct? */
634 GJ_DEBUG(0, "MD5 metadata hash mismatch for provider %s.",
642 * Two functions below are responsible for updating metadata.
643 * Only metadata on the data provider is updated (we need to update
644 * information about active journal in there).
647 g_journal_metadata_done(struct bio *bp)
651 * There is not much we can do on error except informing about it.
653 if (bp->bio_error != 0) {
654 GJ_LOGREQ(0, bp, "Cannot update metadata (error=%d).",
657 GJ_LOGREQ(2, bp, "Metadata updated.");
659 gj_free(bp->bio_data, bp->bio_length);
664 g_journal_metadata_update(struct g_journal_softc *sc)
666 struct g_journal_metadata md;
667 struct g_consumer *cp;
671 cp = sc->sc_dconsumer;
672 sector = gj_malloc(cp->provider->sectorsize, M_WAITOK);
673 strlcpy(md.md_magic, G_JOURNAL_MAGIC, sizeof(md.md_magic));
674 md.md_version = G_JOURNAL_VERSION;
675 md.md_id = sc->sc_id;
676 md.md_type = sc->sc_orig_type;
677 md.md_jstart = sc->sc_jstart;
678 md.md_jend = sc->sc_jend;
679 md.md_joffset = sc->sc_inactive.jj_offset;
680 md.md_jid = sc->sc_journal_previous_id;
682 if (sc->sc_flags & GJF_DEVICE_CLEAN)
683 md.md_flags |= GJ_FLAG_CLEAN;
685 if (sc->sc_flags & GJF_DEVICE_HARDCODED)
686 strlcpy(md.md_provider, sc->sc_name, sizeof(md.md_provider));
688 bzero(md.md_provider, sizeof(md.md_provider));
689 md.md_provsize = cp->provider->mediasize;
690 journal_metadata_encode(&md, sector);
693 * Flush the cache, so we know all data are on disk.
694 * We write here informations like "journal is consistent", so we need
695 * to be sure it is. Without BIO_FLUSH here, we can end up in situation
696 * where metadata is stored on disk, but not all data.
698 g_journal_flush_cache(sc);
701 bp->bio_offset = cp->provider->mediasize - cp->provider->sectorsize;
702 bp->bio_length = cp->provider->sectorsize;
703 bp->bio_data = sector;
704 bp->bio_cmd = BIO_WRITE;
705 if (!(sc->sc_flags & GJF_DEVICE_DESTROY)) {
706 bp->bio_done = g_journal_metadata_done;
707 g_io_request(bp, cp);
710 g_io_request(bp, cp);
711 biowait(bp, "gjmdu");
712 g_journal_metadata_done(bp);
716 * Be sure metadata reached the disk.
718 g_journal_flush_cache(sc);
722 * This is where the I/O request comes from the GEOM.
725 g_journal_start(struct bio *bp)
727 struct g_journal_softc *sc;
729 sc = bp->bio_to->geom->softc;
730 GJ_LOGREQ(3, bp, "Request received.");
732 switch (bp->bio_cmd) {
735 mtx_lock(&sc->sc_mtx);
736 bioq_insert_tail(&sc->sc_regular_queue, bp);
738 mtx_unlock(&sc->sc_mtx);
741 if (strcmp(bp->bio_attribute, "GJOURNAL::provider") == 0) {
742 strlcpy(bp->bio_data, bp->bio_to->name, bp->bio_length);
743 bp->bio_completed = strlen(bp->bio_to->name) + 1;
750 g_io_deliver(bp, EOPNOTSUPP);
756 g_journal_std_done(struct bio *bp)
758 struct g_journal_softc *sc;
760 sc = bp->bio_from->geom->softc;
761 mtx_lock(&sc->sc_mtx);
762 bioq_insert_tail(&sc->sc_back_queue, bp);
764 mtx_unlock(&sc->sc_mtx);
768 g_journal_new_bio(off_t start, off_t end, off_t joffset, u_char *data,
774 bp->bio_offset = start;
775 bp->bio_joffset = joffset;
776 bp->bio_length = end - start;
777 bp->bio_cmd = BIO_WRITE;
778 bp->bio_done = g_journal_std_done;
782 bp->bio_data = gj_malloc(bp->bio_length, flags);
783 if (bp->bio_data != NULL)
784 bcopy(data, bp->bio_data, bp->bio_length);
789 #define g_journal_insert_bio(head, bp, flags) \
790 g_journal_insert((head), (bp)->bio_offset, \
791 (bp)->bio_offset + (bp)->bio_length, (bp)->bio_joffset, \
792 (bp)->bio_data, flags)
794 * The function below does a lot more than just inserting bio to the queue.
795 * It keeps the queue sorted by offset and ensures that there are no doubled
796 * data (it combines bios where ranges overlap).
798 * The function returns the number of bios inserted (as bio can be splitted).
801 g_journal_insert(struct bio **head, off_t nstart, off_t nend, off_t joffset,
802 u_char *data, int flags)
804 struct bio *nbp, *cbp, *pbp;
809 GJ_DEBUG(3, "INSERT(%p): (%jd, %jd, %jd)", *head, nstart, nend,
813 GJQ_FOREACH(*head, cbp) {
814 cstart = cbp->bio_offset;
815 cend = cbp->bio_offset + cbp->bio_length;
817 if (nstart >= cend) {
821 * | current | +-------------+
824 * +-------------+ | bio |
828 GJ_DEBUG(3, "INSERT(%p): 1", *head);
829 } else if (nend <= cstart) {
833 * +-------------+ | current |
836 * | bio | +-------------+
840 nbp = g_journal_new_bio(nstart, nend, joffset, data,
848 GJ_DEBUG(3, "INSERT(%p): 2 (nbp=%p pbp=%p)", *head, nbp,
851 } else if (nstart <= cstart && nend >= cend) {
853 * +-------------+ +-------------+
854 * | current bio | | current bio |
855 * +---+-------------+---+ +-------------+---+
858 * | +-------------+ | +-------------+ |
859 * | new bio | | new bio |
860 * +---------------------+ +-----------------+
862 * +-------------+ +-------------+
863 * | current bio | | current bio |
864 * +---+-------------+ +-------------+
867 * | +-------------+ +-------------+
868 * | new bio | | new bio |
869 * +-----------------+ +-------------+
871 g_journal_stats_bytes_skipped += cbp->bio_length;
872 cbp->bio_offset = nstart;
873 cbp->bio_joffset = joffset;
874 cbp->bio_length = cend - nstart;
875 if (cbp->bio_data != NULL) {
876 gj_free(cbp->bio_data, cend - cstart);
877 cbp->bio_data = NULL;
880 cbp->bio_data = gj_malloc(cbp->bio_length,
882 if (cbp->bio_data != NULL) {
883 bcopy(data, cbp->bio_data,
886 data += cend - nstart;
888 joffset += cend - nstart;
890 GJ_DEBUG(3, "INSERT(%p): 3 (cbp=%p)", *head, cbp);
891 } else if (nstart > cstart && nend >= cend) {
893 * +-----------------+ +-------------+
894 * | current bio | | current bio |
895 * | +-------------+ | +---------+---+
898 * +---+-------------+ +---+---------+ |
899 * | new bio | | new bio |
900 * +-------------+ +-------------+
902 g_journal_stats_bytes_skipped += cend - nstart;
903 nbp = g_journal_new_bio(nstart, cend, joffset, data,
905 nbp->bio_next = cbp->bio_next;
907 cbp->bio_length = nstart - cstart;
908 if (cbp->bio_data != NULL) {
909 cbp->bio_data = gj_realloc(cbp->bio_data,
910 cbp->bio_length, cend - cstart);
913 data += cend - nstart;
914 joffset += cend - nstart;
917 GJ_DEBUG(3, "INSERT(%p): 4 (cbp=%p)", *head, cbp);
918 } else if (nstart > cstart && nend < cend) {
920 * +---------------------+
922 * | +-------------+ |
925 * +---+-------------+---+
929 g_journal_stats_bytes_skipped += nend - nstart;
930 nbp = g_journal_new_bio(nstart, nend, joffset, data,
932 nbp->bio_next = cbp->bio_next;
934 if (cbp->bio_data == NULL)
937 tmpdata = cbp->bio_data + nend - cstart;
938 nbp = g_journal_new_bio(nend, cend,
939 cbp->bio_joffset + nend - cstart, tmpdata, flags);
940 nbp->bio_next = ((struct bio *)cbp->bio_next)->bio_next;
941 ((struct bio *)cbp->bio_next)->bio_next = nbp;
942 cbp->bio_length = nstart - cstart;
943 if (cbp->bio_data != NULL) {
944 cbp->bio_data = gj_realloc(cbp->bio_data,
945 cbp->bio_length, cend - cstart);
948 GJ_DEBUG(3, "INSERT(%p): 5 (cbp=%p)", *head, cbp);
950 } else if (nstart <= cstart && nend < cend) {
952 * +-----------------+ +-------------+
953 * | current bio | | current bio |
954 * +-------------+ | +---+---------+ |
957 * +-------------+---+ | +---------+---+
958 * | new bio | | new bio |
959 * +-------------+ +-------------+
961 g_journal_stats_bytes_skipped += nend - nstart;
962 nbp = g_journal_new_bio(nstart, nend, joffset, data,
969 cbp->bio_offset = nend;
970 cbp->bio_length = cend - nend;
971 cbp->bio_joffset += nend - cstart;
972 tmpdata = cbp->bio_data;
973 if (tmpdata != NULL) {
974 cbp->bio_data = gj_malloc(cbp->bio_length,
976 if (cbp->bio_data != NULL) {
977 bcopy(tmpdata + nend - cstart,
978 cbp->bio_data, cbp->bio_length);
980 gj_free(tmpdata, cend - cstart);
983 GJ_DEBUG(3, "INSERT(%p): 6 (cbp=%p)", *head, cbp);
990 nbp = g_journal_new_bio(nstart, nend, joffset, data, flags);
995 nbp->bio_next = NULL;
997 GJ_DEBUG(3, "INSERT(%p): 8 (nbp=%p pbp=%p)", *head, nbp, pbp);
999 if (g_journal_debug >= 3) {
1000 GJQ_FOREACH(*head, cbp) {
1001 GJ_DEBUG(3, "ELEMENT: %p (%jd, %jd, %jd, %p)", cbp,
1002 (intmax_t)cbp->bio_offset,
1003 (intmax_t)cbp->bio_length,
1004 (intmax_t)cbp->bio_joffset, cbp->bio_data);
1006 GJ_DEBUG(3, "INSERT(%p): DONE %d", *head, n);
1012 * The function combines neighbour bios trying to squeeze as much data as
1013 * possible into one bio.
1015 * The function returns the number of bios combined (negative value).
1018 g_journal_optimize(struct bio *head)
1020 struct bio *cbp, *pbp;
1025 GJQ_FOREACH(head, cbp) {
1026 /* Skip bios which has to be read first. */
1027 if (cbp->bio_data == NULL) {
1031 /* There is no previous bio yet. */
1036 /* Is this a neighbour bio? */
1037 if (pbp->bio_offset + pbp->bio_length != cbp->bio_offset) {
1038 /* Be sure that bios queue is sorted. */
1039 KASSERT(pbp->bio_offset + pbp->bio_length < cbp->bio_offset,
1040 ("poffset=%jd plength=%jd coffset=%jd",
1041 (intmax_t)pbp->bio_offset,
1042 (intmax_t)pbp->bio_length,
1043 (intmax_t)cbp->bio_offset));
1047 /* Be sure we don't end up with too big bio. */
1048 if (pbp->bio_length + cbp->bio_length > MAXPHYS) {
1052 /* Ok, we can join bios. */
1053 GJ_LOGREQ(4, pbp, "Join: ");
1054 GJ_LOGREQ(4, cbp, "and: ");
1055 pbp->bio_data = gj_realloc(pbp->bio_data,
1056 pbp->bio_length + cbp->bio_length, pbp->bio_length);
1057 bcopy(cbp->bio_data, pbp->bio_data + pbp->bio_length,
1059 gj_free(cbp->bio_data, cbp->bio_length);
1060 pbp->bio_length += cbp->bio_length;
1061 pbp->bio_next = cbp->bio_next;
1064 g_journal_stats_combined_ios++;
1066 GJ_LOGREQ(4, pbp, "Got: ");
1072 * TODO: Update comment.
1073 * These are functions responsible for copying one portion of data from journal
1074 * to the destination provider.
1075 * The order goes like this:
1076 * 1. Read the header, which contains informations about data blocks
1078 * 2. Read the data blocks from the journal.
1079 * 3. Write the data blocks on the data provider.
1081 * g_journal_copy_start()
1082 * g_journal_copy_done() - got finished write request, logs potential errors.
1086 * When there is no data in cache, this function is used to read it.
1089 g_journal_read_first(struct g_journal_softc *sc, struct bio *bp)
1094 * We were short in memory, so data was freed.
1095 * In that case we need to read it back from journal.
1097 cbp = g_alloc_bio();
1098 cbp->bio_cflags = bp->bio_cflags;
1099 cbp->bio_parent = bp;
1100 cbp->bio_offset = bp->bio_joffset;
1101 cbp->bio_length = bp->bio_length;
1102 cbp->bio_data = gj_malloc(bp->bio_length, M_WAITOK);
1103 cbp->bio_cmd = BIO_READ;
1104 cbp->bio_done = g_journal_std_done;
1105 GJ_LOGREQ(4, cbp, "READ FIRST");
1106 g_io_request(cbp, sc->sc_jconsumer);
1107 g_journal_cache_misses++;
1111 g_journal_copy_send(struct g_journal_softc *sc)
1113 struct bio *bioq, *bp, *lbp;
1116 mtx_lock(&sc->sc_mtx);
1117 for (; sc->sc_copy_in_progress < g_journal_parallel_copies;) {
1118 bp = GJQ_FIRST(sc->sc_inactive.jj_queue);
1121 GJQ_REMOVE(sc->sc_inactive.jj_queue, bp);
1122 sc->sc_copy_in_progress++;
1123 GJQ_INSERT_AFTER(bioq, bp, lbp);
1126 mtx_unlock(&sc->sc_mtx);
1127 if (g_journal_do_optimize)
1128 sc->sc_copy_in_progress += g_journal_optimize(bioq);
1129 while ((bp = GJQ_FIRST(bioq)) != NULL) {
1130 GJQ_REMOVE(bioq, bp);
1131 GJQ_INSERT_HEAD(sc->sc_copy_queue, bp);
1132 bp->bio_cflags = GJ_BIO_COPY;
1133 if (bp->bio_data == NULL)
1134 g_journal_read_first(sc, bp);
1136 bp->bio_joffset = 0;
1137 GJ_LOGREQ(4, bp, "SEND");
1138 g_io_request(bp, sc->sc_dconsumer);
1144 g_journal_copy_start(struct g_journal_softc *sc)
1148 * Remember in metadata that we're starting to copy journaled data
1149 * to the data provider.
1150 * In case of power failure, we will copy these data once again on boot.
1152 if (!sc->sc_journal_copying) {
1153 sc->sc_journal_copying = 1;
1154 GJ_DEBUG(1, "Starting copy of journal.");
1155 g_journal_metadata_update(sc);
1157 g_journal_copy_send(sc);
1161 * Data block has been read from the journal provider.
1164 g_journal_copy_read_done(struct bio *bp)
1166 struct g_journal_softc *sc;
1167 struct g_consumer *cp;
1170 KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1171 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1173 sc = bp->bio_from->geom->softc;
1174 pbp = bp->bio_parent;
1176 if (bp->bio_error != 0) {
1177 GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1178 bp->bio_to->name, bp->bio_error);
1180 * We will not be able to deliver WRITE request as well.
1182 gj_free(bp->bio_data, bp->bio_length);
1185 sc->sc_copy_in_progress--;
1188 pbp->bio_data = bp->bio_data;
1189 cp = sc->sc_dconsumer;
1190 g_io_request(pbp, cp);
1191 GJ_LOGREQ(4, bp, "READ DONE");
1197 * Data block has been written to the data provider.
1200 g_journal_copy_write_done(struct bio *bp)
1202 struct g_journal_softc *sc;
1204 KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1205 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1207 sc = bp->bio_from->geom->softc;
1208 sc->sc_copy_in_progress--;
1210 if (bp->bio_error != 0) {
1211 GJ_LOGREQ(0, bp, "[copy] Error while writing data (error=%d)",
1214 GJQ_REMOVE(sc->sc_copy_queue, bp);
1215 gj_free(bp->bio_data, bp->bio_length);
1216 GJ_LOGREQ(4, bp, "DONE");
1219 if (sc->sc_copy_in_progress == 0) {
1221 * This was the last write request for this journal.
1223 GJ_DEBUG(1, "Data has been copied.");
1224 sc->sc_journal_copying = 0;
1228 static void g_journal_flush_done(struct bio *bp);
1231 * Flush one record onto active journal provider.
1234 g_journal_flush(struct g_journal_softc *sc)
1236 struct g_journal_record_header hdr;
1237 struct g_journal_entry *ent;
1238 struct g_provider *pp;
1240 struct bio *bp, *fbp, *pbp;
1242 u_char *data, hash[16];
1246 if (sc->sc_current_count == 0)
1249 pp = sc->sc_jprovider;
1250 GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1251 joffset = sc->sc_journal_offset;
1253 GJ_DEBUG(2, "Storing %d journal entries on %s at %jd.",
1254 sc->sc_current_count, pp->name, (intmax_t)joffset);
1257 * Store 'journal id', so we know to which journal this record belongs.
1259 hdr.jrh_journal_id = sc->sc_journal_id;
1260 /* Could be less than g_journal_record_entries if called due timeout. */
1261 hdr.jrh_nentries = MIN(sc->sc_current_count, g_journal_record_entries);
1262 strlcpy(hdr.jrh_magic, GJ_RECORD_HEADER_MAGIC, sizeof(hdr.jrh_magic));
1264 bioq = &sc->sc_active.jj_queue;
1265 GJQ_LAST(sc->sc_flush_queue, pbp);
1267 fbp = g_alloc_bio();
1268 fbp->bio_parent = NULL;
1269 fbp->bio_cflags = GJ_BIO_JOURNAL;
1270 fbp->bio_offset = -1;
1271 fbp->bio_joffset = joffset;
1272 fbp->bio_length = pp->sectorsize;
1273 fbp->bio_cmd = BIO_WRITE;
1274 fbp->bio_done = g_journal_std_done;
1275 GJQ_INSERT_AFTER(sc->sc_flush_queue, fbp, pbp);
1278 GJ_LOGREQ(4, fbp, "FLUSH_OUT");
1279 joffset += pp->sectorsize;
1280 sc->sc_flush_count++;
1281 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1284 for (i = 0; i < hdr.jrh_nentries; i++) {
1285 bp = sc->sc_current_queue;
1286 KASSERT(bp != NULL, ("NULL bp"));
1288 GJ_LOGREQ(4, bp, "FLUSHED");
1289 sc->sc_current_queue = bp->bio_next;
1290 bp->bio_next = NULL;
1291 sc->sc_current_count--;
1293 /* Add to the header. */
1294 ent = &hdr.jrh_entries[i];
1295 ent->je_offset = bp->bio_offset;
1296 ent->je_joffset = joffset;
1297 ent->je_length = bp->bio_length;
1299 data = bp->bio_data;
1300 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1301 MD5Update(&ctx, data, ent->je_length);
1303 bp->bio_cflags = GJ_BIO_JOURNAL;
1304 bp->bio_offset = ent->je_offset;
1305 bp->bio_joffset = ent->je_joffset;
1306 bp->bio_length = ent->je_length;
1307 bp->bio_data = data;
1308 bp->bio_cmd = BIO_WRITE;
1309 bp->bio_done = g_journal_std_done;
1310 GJQ_INSERT_AFTER(sc->sc_flush_queue, bp, pbp);
1313 GJ_LOGREQ(4, bp, "FLUSH_OUT");
1314 joffset += bp->bio_length;
1315 sc->sc_flush_count++;
1318 * Add request to the active sc_journal_queue queue.
1319 * This is our cache. After journal switch we don't have to
1320 * read the data from the inactive journal, because we keep
1323 g_journal_insert(bioq, ent->je_offset,
1324 ent->je_offset + ent->je_length, ent->je_joffset, data,
1329 * After all requests, store valid header.
1331 data = gj_malloc(pp->sectorsize, M_WAITOK);
1332 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1333 MD5Final(hash, &ctx);
1334 bcopy(hash, hdr.jrh_sum, sizeof(hdr.jrh_sum));
1336 g_journal_record_header_encode(&hdr, data);
1337 fbp->bio_data = data;
1339 sc->sc_journal_offset = joffset;
1341 g_journal_check_overflow(sc);
1345 * Flush request finished.
1348 g_journal_flush_done(struct bio *bp)
1350 struct g_journal_softc *sc;
1351 struct g_consumer *cp;
1353 KASSERT((bp->bio_cflags & GJ_BIO_MASK) == GJ_BIO_JOURNAL,
1354 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_JOURNAL));
1357 sc = cp->geom->softc;
1358 sc->sc_flush_in_progress--;
1360 if (bp->bio_error != 0) {
1361 GJ_LOGREQ(0, bp, "[flush] Error while writing data (error=%d)",
1364 gj_free(bp->bio_data, bp->bio_length);
1365 GJ_LOGREQ(4, bp, "DONE");
1369 static void g_journal_release_delayed(struct g_journal_softc *sc);
1372 g_journal_flush_send(struct g_journal_softc *sc)
1374 struct g_consumer *cp;
1375 struct bio *bioq, *bp, *lbp;
1377 cp = sc->sc_jconsumer;
1379 while (sc->sc_flush_in_progress < g_journal_parallel_flushes) {
1380 /* Send one flush requests to the active journal. */
1381 bp = GJQ_FIRST(sc->sc_flush_queue);
1383 GJQ_REMOVE(sc->sc_flush_queue, bp);
1384 sc->sc_flush_count--;
1385 bp->bio_offset = bp->bio_joffset;
1386 bp->bio_joffset = 0;
1387 sc->sc_flush_in_progress++;
1388 GJQ_INSERT_AFTER(bioq, bp, lbp);
1391 /* Try to release delayed requests. */
1392 g_journal_release_delayed(sc);
1393 /* If there are no requests to flush, leave. */
1394 if (GJQ_FIRST(sc->sc_flush_queue) == NULL)
1397 if (g_journal_do_optimize)
1398 sc->sc_flush_in_progress += g_journal_optimize(bioq);
1399 while ((bp = GJQ_FIRST(bioq)) != NULL) {
1400 GJQ_REMOVE(bioq, bp);
1401 GJ_LOGREQ(3, bp, "Flush request send");
1402 g_io_request(bp, cp);
1407 g_journal_add_current(struct g_journal_softc *sc, struct bio *bp)
1411 GJ_LOGREQ(4, bp, "CURRENT %d", sc->sc_current_count);
1412 n = g_journal_insert_bio(&sc->sc_current_queue, bp, M_WAITOK);
1413 sc->sc_current_count += n;
1414 n = g_journal_optimize(sc->sc_current_queue);
1415 sc->sc_current_count += n;
1417 * For requests which are added to the current queue we deliver
1418 * response immediately.
1420 bp->bio_completed = bp->bio_length;
1421 g_io_deliver(bp, 0);
1422 if (sc->sc_current_count >= g_journal_record_entries) {
1424 * Let's flush one record onto active journal provider.
1426 g_journal_flush(sc);
1431 g_journal_release_delayed(struct g_journal_softc *sc)
1436 /* The flush queue is full, exit. */
1437 if (sc->sc_flush_count >= g_journal_accept_immediately)
1439 bp = bioq_takefirst(&sc->sc_delayed_queue);
1442 sc->sc_delayed_count--;
1443 g_journal_add_current(sc, bp);
1448 * Add I/O request to the current queue. If we have enough requests for one
1449 * journal record we flush them onto active journal provider.
1452 g_journal_add_request(struct g_journal_softc *sc, struct bio *bp)
1456 * The flush queue is full, we need to delay the request.
1458 if (sc->sc_delayed_count > 0 ||
1459 sc->sc_flush_count >= g_journal_accept_immediately) {
1460 GJ_LOGREQ(4, bp, "DELAYED");
1461 bioq_insert_tail(&sc->sc_delayed_queue, bp);
1462 sc->sc_delayed_count++;
1466 KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
1467 ("DELAYED queue not empty."));
1468 g_journal_add_current(sc, bp);
1471 static void g_journal_read_done(struct bio *bp);
1474 * Try to find requested data in cache.
1477 g_journal_read_find(struct bio *head, int sorted, struct bio *pbp, off_t ostart,
1483 GJQ_FOREACH(head, bp) {
1484 if (bp->bio_offset == -1)
1486 cstart = MAX(ostart, bp->bio_offset);
1487 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1490 else if (cstart >= oend) {
1498 if (bp->bio_data == NULL)
1500 GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1502 bcopy(bp->bio_data + cstart - bp->bio_offset,
1503 pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1504 pbp->bio_completed += cend - cstart;
1505 if (pbp->bio_completed == pbp->bio_length) {
1507 * Cool, the whole request was in cache, deliver happy
1510 g_io_deliver(pbp, 0);
1519 * This function is used for collecting data on read.
1520 * The complexity is because parts of the data can be stored in four different
1522 * - in memory - the data not yet send to the active journal provider
1523 * - in the active journal
1524 * - in the inactive journal
1525 * - in the data provider
1528 g_journal_read(struct g_journal_softc *sc, struct bio *pbp, off_t ostart,
1531 struct bio *bp, *nbp, *head;
1533 u_int i, sorted = 0;
1535 GJ_DEBUG(3, "READ: (%jd, %jd)", ostart, oend);
1540 for (i = 1; i <= 5; i++) {
1542 case 1: /* Not-yet-send data. */
1543 head = sc->sc_current_queue;
1546 case 2: /* Skip flush queue as they are also in active queue */
1548 case 3: /* Active journal. */
1549 head = sc->sc_active.jj_queue;
1552 case 4: /* Inactive journal. */
1554 * XXX: Here could be a race with g_journal_lowmem().
1556 head = sc->sc_inactive.jj_queue;
1559 case 5: /* In-flight to the data provider. */
1560 head = sc->sc_copy_queue;
1564 panic("gjournal %s: i=%d", __func__, i);
1566 bp = g_journal_read_find(head, sorted, pbp, ostart, oend);
1567 if (bp == pbp) { /* Got the whole request. */
1568 GJ_DEBUG(2, "Got the whole request from %u.", i);
1570 } else if (bp != NULL) {
1571 cstart = MAX(ostart, bp->bio_offset);
1572 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1573 GJ_DEBUG(2, "Got part of the request from %u (%jd-%jd).",
1574 i, (intmax_t)cstart, (intmax_t)cend);
1579 if (bp->bio_data == NULL) {
1580 nbp = g_duplicate_bio(pbp);
1581 nbp->bio_cflags = GJ_BIO_READ;
1583 pbp->bio_data + cstart - pbp->bio_offset;
1585 bp->bio_joffset + cstart - bp->bio_offset;
1586 nbp->bio_length = cend - cstart;
1587 nbp->bio_done = g_journal_read_done;
1588 g_io_request(nbp, sc->sc_jconsumer);
1591 * If we don't have the whole request yet, call g_journal_read()
1594 if (ostart < cstart)
1595 g_journal_read(sc, pbp, ostart, cstart);
1597 g_journal_read(sc, pbp, cend, oend);
1600 * No data in memory, no data in journal.
1601 * Its time for asking data provider.
1603 GJ_DEBUG(3, "READ(data): (%jd, %jd)", ostart, oend);
1604 nbp = g_duplicate_bio(pbp);
1605 nbp->bio_cflags = GJ_BIO_READ;
1606 nbp->bio_data = pbp->bio_data + ostart - pbp->bio_offset;
1607 nbp->bio_offset = ostart;
1608 nbp->bio_length = oend - ostart;
1609 nbp->bio_done = g_journal_read_done;
1610 g_io_request(nbp, sc->sc_dconsumer);
1611 /* We have the whole request, return here. */
1617 * Function responsible for handling finished READ requests.
1618 * Actually, g_std_done() could be used here, the only difference is that we
1622 g_journal_read_done(struct bio *bp)
1626 KASSERT(bp->bio_cflags == GJ_BIO_READ,
1627 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_READ));
1629 pbp = bp->bio_parent;
1631 pbp->bio_completed += bp->bio_length;
1633 if (bp->bio_error != 0) {
1634 if (pbp->bio_error == 0)
1635 pbp->bio_error = bp->bio_error;
1636 GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1637 bp->bio_to->name, bp->bio_error);
1640 if (pbp->bio_children == pbp->bio_inbed &&
1641 pbp->bio_completed == pbp->bio_length) {
1643 g_io_deliver(pbp, 0);
1648 * Deactive current journal and active next one.
1651 g_journal_switch(struct g_journal_softc *sc)
1653 struct g_provider *pp;
1656 GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
1657 pp = LIST_FIRST(&sc->sc_geom->provider);
1658 if (!(sc->sc_flags & GJF_DEVICE_CLEAN) && pp->acw == 0) {
1659 sc->sc_flags |= GJF_DEVICE_CLEAN;
1660 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
1661 g_journal_metadata_update(sc);
1664 GJ_DEBUG(3, "Switching journal %s.", sc->sc_geom->name);
1666 pp = sc->sc_jprovider;
1668 sc->sc_journal_previous_id = sc->sc_journal_id;
1670 sc->sc_journal_id = sc->sc_journal_next_id;
1671 sc->sc_journal_next_id = arc4random();
1673 GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1675 g_journal_write_header(sc);
1677 sc->sc_inactive.jj_offset = sc->sc_active.jj_offset;
1678 sc->sc_inactive.jj_queue = sc->sc_active.jj_queue;
1680 sc->sc_active.jj_offset =
1681 sc->sc_journal_offset - pp->sectorsize;
1682 sc->sc_active.jj_queue = NULL;
1685 * Switch is done, start copying data from the (now) inactive
1686 * journal to the data provider.
1688 g_journal_copy_start(sc);
1690 mtx_lock(&sc->sc_mtx);
1691 sc->sc_flags &= ~GJF_DEVICE_SWITCH;
1692 mtx_unlock(&sc->sc_mtx);
1696 g_journal_initialize(struct g_journal_softc *sc)
1699 sc->sc_journal_id = arc4random();
1700 sc->sc_journal_next_id = arc4random();
1701 sc->sc_journal_previous_id = sc->sc_journal_id;
1702 sc->sc_journal_offset = sc->sc_jstart;
1703 sc->sc_inactive.jj_offset = sc->sc_jstart;
1704 g_journal_write_header(sc);
1705 sc->sc_active.jj_offset = sc->sc_jstart;
1709 g_journal_mark_as_dirty(struct g_journal_softc *sc)
1711 const struct g_journal_desc *desc;
1714 GJ_DEBUG(1, "Marking file system %s as dirty.", sc->sc_name);
1715 for (i = 0; (desc = g_journal_filesystems[i]) != NULL; i++)
1716 desc->jd_dirty(sc->sc_dconsumer);
1720 * Function read record header from the given journal.
1721 * It is very simlar to g_read_data(9), but it doesn't allocate memory for bio
1722 * and data on every call.
1725 g_journal_sync_read(struct g_consumer *cp, struct bio *bp, off_t offset,
1731 bp->bio_cmd = BIO_READ;
1732 bp->bio_done = NULL;
1733 bp->bio_offset = offset;
1734 bp->bio_length = cp->provider->sectorsize;
1735 bp->bio_data = data;
1736 g_io_request(bp, cp);
1737 error = biowait(bp, "gjs_read");
1743 * Function is called when we start the journal device and we detect that
1744 * one of the journals was not fully copied.
1745 * The purpose of this function is to read all records headers from journal
1746 * and placed them in the inactive queue, so we can start journal
1747 * synchronization process and the journal provider itself.
1748 * Design decision was taken to not synchronize the whole journal here as it
1749 * can take too much time. Reading headers only and delaying synchronization
1750 * process until after journal provider is started should be the best choice.
1755 g_journal_sync(struct g_journal_softc *sc)
1757 struct g_journal_record_header rhdr;
1758 struct g_journal_entry *ent;
1759 struct g_journal_header jhdr;
1760 struct g_consumer *cp;
1761 struct bio *bp, *fbp, *tbp;
1762 off_t joffset, offset;
1763 u_char *buf, sum[16];
1766 int error, found, i;
1770 cp = sc->sc_jconsumer;
1772 buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
1773 offset = joffset = sc->sc_inactive.jj_offset = sc->sc_journal_offset;
1775 GJ_DEBUG(2, "Looking for termination at %jd.", (intmax_t)joffset);
1778 * Read and decode first journal header.
1780 error = g_journal_sync_read(cp, bp, offset, buf);
1782 GJ_DEBUG(0, "Error while reading journal header from %s.",
1783 cp->provider->name);
1786 error = g_journal_header_decode(buf, &jhdr);
1788 GJ_DEBUG(0, "Cannot decode journal header from %s.",
1789 cp->provider->name);
1792 id = sc->sc_journal_id;
1793 if (jhdr.jh_journal_id != sc->sc_journal_id) {
1794 GJ_DEBUG(1, "Journal ID mismatch at %jd (0x%08x != 0x%08x).",
1795 (intmax_t)offset, (u_int)jhdr.jh_journal_id, (u_int)id);
1798 offset += cp->provider->sectorsize;
1799 id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1803 * If the biggest record won't fit, look for a record header or
1804 * journal header from the beginning.
1806 GJ_VALIDATE_OFFSET(offset, sc);
1807 error = g_journal_sync_read(cp, bp, offset, buf);
1810 * Not good. Having an error while reading header
1811 * means, that we cannot read next headers and in
1812 * consequence we cannot find termination.
1815 "Error while reading record header from %s.",
1816 cp->provider->name);
1820 error = g_journal_record_header_decode(buf, &rhdr);
1822 GJ_DEBUG(2, "Not a record header at %jd (error=%d).",
1823 (intmax_t)offset, error);
1825 * This is not a record header.
1826 * If we are lucky, this is next journal header.
1828 error = g_journal_header_decode(buf, &jhdr);
1830 GJ_DEBUG(1, "Not a journal header at %jd (error=%d).",
1831 (intmax_t)offset, error);
1833 * Nope, this is not journal header, which
1834 * bascially means that journal is not
1835 * terminated properly.
1841 * Ok. This is header of _some_ journal. Now we need to
1842 * verify if this is header of the _next_ journal.
1844 if (jhdr.jh_journal_id != id) {
1845 GJ_DEBUG(1, "Journal ID mismatch at %jd "
1846 "(0x%08x != 0x%08x).", (intmax_t)offset,
1847 (u_int)jhdr.jh_journal_id, (u_int)id);
1852 /* Found termination. */
1854 GJ_DEBUG(1, "Found termination at %jd (id=0x%08x).",
1855 (intmax_t)offset, (u_int)id);
1856 sc->sc_active.jj_offset = offset;
1857 sc->sc_journal_offset =
1858 offset + cp->provider->sectorsize;
1859 sc->sc_journal_id = id;
1860 id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1862 while ((tbp = fbp) != NULL) {
1863 fbp = tbp->bio_next;
1864 GJ_LOGREQ(3, tbp, "Adding request.");
1865 g_journal_insert_bio(&sc->sc_inactive.jj_queue,
1869 /* Skip journal's header. */
1870 offset += cp->provider->sectorsize;
1874 /* Skip record's header. */
1875 offset += cp->provider->sectorsize;
1878 * Add information about every record entry to the inactive
1881 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1883 for (i = 0; i < rhdr.jrh_nentries; i++) {
1884 ent = &rhdr.jrh_entries[i];
1885 GJ_DEBUG(3, "Insert entry: %jd %jd.",
1886 (intmax_t)ent->je_offset, (intmax_t)ent->je_length);
1887 g_journal_insert(&fbp, ent->je_offset,
1888 ent->je_offset + ent->je_length, ent->je_joffset,
1890 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1894 * TODO: Should use faster function (like
1895 * g_journal_sync_read()).
1897 buf2 = g_read_data(cp, offset, ent->je_length,
1900 GJ_DEBUG(0, "Cannot read data at %jd.",
1903 MD5Update(&ctx, buf2, ent->je_length);
1907 /* Skip entry's data. */
1908 offset += ent->je_length;
1910 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1911 MD5Final(sum, &ctx);
1912 if (bcmp(sum, rhdr.jrh_sum, sizeof(rhdr.jrh_sum)) != 0) {
1913 GJ_DEBUG(0, "MD5 hash mismatch at %jd!",
1919 gj_free(bp->bio_data, cp->provider->sectorsize);
1922 /* Remove bios from unterminated journal. */
1923 while ((tbp = fbp) != NULL) {
1924 fbp = tbp->bio_next;
1928 if (found < 1 && joffset > 0) {
1929 GJ_DEBUG(0, "Journal on %s is broken/corrupted. Initializing.",
1931 while ((tbp = sc->sc_inactive.jj_queue) != NULL) {
1932 sc->sc_inactive.jj_queue = tbp->bio_next;
1935 g_journal_initialize(sc);
1936 g_journal_mark_as_dirty(sc);
1938 GJ_DEBUG(0, "Journal %s consistent.", sc->sc_name);
1939 g_journal_copy_start(sc);
1944 * Wait for requests.
1945 * If we have requests in the current queue, flush them after 3 seconds from the
1946 * last flush. In this way we don't wait forever (or for journal switch) with
1947 * storing not full records on journal.
1950 g_journal_wait(struct g_journal_softc *sc, time_t last_write)
1954 GJ_DEBUG(3, "%s: enter", __func__);
1955 if (sc->sc_current_count == 0) {
1956 if (g_journal_debug < 2)
1957 msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", 0);
1960 * If we have debug turned on, show number of elements
1961 * in various queues.
1964 error = msleep(sc, &sc->sc_mtx, PRIBIO,
1967 mtx_unlock(&sc->sc_mtx);
1970 GJ_DEBUG(3, "Report: current count=%d",
1971 sc->sc_current_count);
1972 GJ_DEBUG(3, "Report: flush count=%d",
1973 sc->sc_flush_count);
1974 GJ_DEBUG(3, "Report: flush in progress=%d",
1975 sc->sc_flush_in_progress);
1976 GJ_DEBUG(3, "Report: copy in progress=%d",
1977 sc->sc_copy_in_progress);
1978 GJ_DEBUG(3, "Report: delayed=%d",
1979 sc->sc_delayed_count);
1982 GJ_DEBUG(3, "%s: exit 1", __func__);
1987 * Flush even not full records every 3 seconds.
1989 timeout = (last_write + 3 - time_second) * hz;
1991 mtx_unlock(&sc->sc_mtx);
1992 g_journal_flush(sc);
1993 g_journal_flush_send(sc);
1994 GJ_DEBUG(3, "%s: exit 2", __func__);
1997 error = msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", timeout);
1998 if (error == EWOULDBLOCK)
1999 g_journal_flush_send(sc);
2000 GJ_DEBUG(3, "%s: exit 3", __func__);
2007 g_journal_worker(void *arg)
2009 struct g_journal_softc *sc;
2011 struct g_provider *pp;
2016 thread_lock(curthread);
2017 sched_prio(curthread, PRIBIO);
2018 thread_unlock(curthread);
2023 if (sc->sc_flags & GJF_DEVICE_CLEAN) {
2024 GJ_DEBUG(0, "Journal %s clean.", sc->sc_name);
2025 g_journal_initialize(sc);
2030 * Check if we can use BIO_FLUSH.
2032 sc->sc_bio_flush = 0;
2033 if (g_io_flush(sc->sc_jconsumer) == 0) {
2034 sc->sc_bio_flush |= GJ_FLUSH_JOURNAL;
2035 GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2036 sc->sc_jconsumer->provider->name);
2038 GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2039 sc->sc_jconsumer->provider->name);
2041 if (sc->sc_jconsumer != sc->sc_dconsumer) {
2042 if (g_io_flush(sc->sc_dconsumer) == 0) {
2043 sc->sc_bio_flush |= GJ_FLUSH_DATA;
2044 GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2045 sc->sc_dconsumer->provider->name);
2047 GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2048 sc->sc_dconsumer->provider->name);
2054 pp = g_new_providerf(gp, "%s.journal", sc->sc_name);
2055 pp->mediasize = sc->sc_mediasize;
2057 * There could be a problem when data provider and journal providers
2058 * have different sectorsize, but such scenario is prevented on journal
2061 pp->sectorsize = sc->sc_sectorsize;
2062 g_error_provider(pp, 0);
2063 g_topology_unlock();
2064 last_write = time_second;
2066 if (sc->sc_rootmount != NULL) {
2067 GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2068 root_mount_rel(sc->sc_rootmount);
2069 sc->sc_rootmount = NULL;
2073 /* Get first request from the queue. */
2074 mtx_lock(&sc->sc_mtx);
2075 bp = bioq_first(&sc->sc_back_queue);
2077 type = (bp->bio_cflags & GJ_BIO_MASK);
2079 bp = bioq_first(&sc->sc_regular_queue);
2081 type = GJ_BIO_REGULAR;
2085 if ((sc->sc_flags & GJF_DEVICE_SWITCH) ||
2086 (sc->sc_flags & GJF_DEVICE_DESTROY)) {
2087 if (sc->sc_current_count > 0) {
2088 mtx_unlock(&sc->sc_mtx);
2089 g_journal_flush(sc);
2090 g_journal_flush_send(sc);
2093 if (sc->sc_flush_in_progress > 0)
2095 if (sc->sc_copy_in_progress > 0)
2098 if (sc->sc_flags & GJF_DEVICE_SWITCH) {
2099 mtx_unlock(&sc->sc_mtx);
2100 g_journal_switch(sc);
2101 wakeup(&sc->sc_journal_copying);
2104 if (sc->sc_flags & GJF_DEVICE_DESTROY) {
2105 GJ_DEBUG(1, "Shutting down worker "
2106 "thread for %s.", gp->name);
2107 sc->sc_worker = NULL;
2108 wakeup(&sc->sc_worker);
2109 mtx_unlock(&sc->sc_mtx);
2113 g_journal_wait(sc, last_write);
2117 * If we're in switch process, we need to delay all new
2118 * write requests until its done.
2120 if ((sc->sc_flags & GJF_DEVICE_SWITCH) &&
2121 type == GJ_BIO_REGULAR && bp->bio_cmd == BIO_WRITE) {
2122 GJ_LOGREQ(2, bp, "WRITE on SWITCH");
2125 if (type == GJ_BIO_REGULAR)
2126 bioq_remove(&sc->sc_regular_queue, bp);
2128 bioq_remove(&sc->sc_back_queue, bp);
2129 mtx_unlock(&sc->sc_mtx);
2131 case GJ_BIO_REGULAR:
2132 /* Regular request. */
2133 switch (bp->bio_cmd) {
2135 g_journal_read(sc, bp, bp->bio_offset,
2136 bp->bio_offset + bp->bio_length);
2139 last_write = time_second;
2140 g_journal_add_request(sc, bp);
2141 g_journal_flush_send(sc);
2144 panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2148 switch (bp->bio_cmd) {
2150 if (g_journal_copy_read_done(bp))
2151 g_journal_copy_send(sc);
2154 g_journal_copy_write_done(bp);
2155 g_journal_copy_send(sc);
2158 panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2161 case GJ_BIO_JOURNAL:
2162 g_journal_flush_done(bp);
2163 g_journal_flush_send(sc);
2167 panic("Invalid bio (%d).", type);
2173 g_journal_destroy_event(void *arg, int flags __unused)
2175 struct g_journal_softc *sc;
2177 g_topology_assert();
2179 g_journal_destroy(sc);
2183 g_journal_timeout(void *arg)
2185 struct g_journal_softc *sc;
2188 GJ_DEBUG(0, "Timeout. Journal %s cannot be completed.",
2190 g_post_event(g_journal_destroy_event, sc, M_NOWAIT, NULL);
2193 static struct g_geom *
2194 g_journal_create(struct g_class *mp, struct g_provider *pp,
2195 const struct g_journal_metadata *md)
2197 struct g_journal_softc *sc;
2199 struct g_consumer *cp;
2202 sc = NULL; /* gcc */
2204 g_topology_assert();
2206 * There are two possibilities:
2207 * 1. Data and both journals are on the same provider.
2208 * 2. Data and journals are all on separated providers.
2210 /* Look for journal device with the same ID. */
2211 LIST_FOREACH(gp, &mp->geom, geom) {
2215 if (sc->sc_id == md->md_id)
2220 else if (sc != NULL && (sc->sc_type & md->md_type) != 0) {
2221 GJ_DEBUG(1, "Journal device %u already configured.", sc->sc_id);
2224 if (md->md_type == 0 || (md->md_type & ~GJ_TYPE_COMPLETE) != 0) {
2225 GJ_DEBUG(0, "Invalid type on %s.", pp->name);
2228 if (md->md_type & GJ_TYPE_DATA) {
2229 GJ_DEBUG(0, "Journal %u: %s contains data.", md->md_id,
2232 if (md->md_type & GJ_TYPE_JOURNAL) {
2233 GJ_DEBUG(0, "Journal %u: %s contains journal.", md->md_id,
2239 sc = malloc(sizeof(*sc), M_JOURNAL, M_WAITOK | M_ZERO);
2240 sc->sc_id = md->md_id;
2243 sc->sc_worker = NULL;
2245 gp = g_new_geomf(mp, "gjournal %u", sc->sc_id);
2246 gp->start = g_journal_start;
2247 gp->orphan = g_journal_orphan;
2248 gp->access = g_journal_access;
2250 gp->flags |= G_GEOM_VOLATILE_BIO;
2253 mtx_init(&sc->sc_mtx, "gjournal", NULL, MTX_DEF);
2255 bioq_init(&sc->sc_back_queue);
2256 bioq_init(&sc->sc_regular_queue);
2257 bioq_init(&sc->sc_delayed_queue);
2258 sc->sc_delayed_count = 0;
2259 sc->sc_current_queue = NULL;
2260 sc->sc_current_count = 0;
2261 sc->sc_flush_queue = NULL;
2262 sc->sc_flush_count = 0;
2263 sc->sc_flush_in_progress = 0;
2264 sc->sc_copy_queue = NULL;
2265 sc->sc_copy_in_progress = 0;
2266 sc->sc_inactive.jj_queue = NULL;
2267 sc->sc_active.jj_queue = NULL;
2269 sc->sc_rootmount = root_mount_hold("GJOURNAL");
2270 GJ_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
2272 callout_init(&sc->sc_callout, 1);
2273 if (md->md_type != GJ_TYPE_COMPLETE) {
2275 * Journal and data are on separate providers.
2276 * At this point we have only one of them.
2277 * We setup a timeout in case the other part will not
2278 * appear, so we won't wait forever.
2280 callout_reset(&sc->sc_callout, 5 * hz,
2281 g_journal_timeout, sc);
2285 /* Remember type of the data provider. */
2286 if (md->md_type & GJ_TYPE_DATA)
2287 sc->sc_orig_type = md->md_type;
2288 sc->sc_type |= md->md_type;
2291 if (md->md_type & GJ_TYPE_DATA) {
2292 if (md->md_flags & GJ_FLAG_CLEAN)
2293 sc->sc_flags |= GJF_DEVICE_CLEAN;
2294 if (md->md_flags & GJ_FLAG_CHECKSUM)
2295 sc->sc_flags |= GJF_DEVICE_CHECKSUM;
2296 cp = g_new_consumer(gp);
2297 error = g_attach(cp, pp);
2298 KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2300 error = g_access(cp, 1, 1, 1);
2302 GJ_DEBUG(0, "Cannot access %s (error=%d).", pp->name,
2304 g_journal_destroy(sc);
2307 sc->sc_dconsumer = cp;
2308 sc->sc_mediasize = pp->mediasize - pp->sectorsize;
2309 sc->sc_sectorsize = pp->sectorsize;
2310 sc->sc_jstart = md->md_jstart;
2311 sc->sc_jend = md->md_jend;
2312 if (md->md_provider[0] != '\0')
2313 sc->sc_flags |= GJF_DEVICE_HARDCODED;
2314 sc->sc_journal_offset = md->md_joffset;
2315 sc->sc_journal_id = md->md_jid;
2316 sc->sc_journal_previous_id = md->md_jid;
2318 if (md->md_type & GJ_TYPE_JOURNAL) {
2320 cp = g_new_consumer(gp);
2321 error = g_attach(cp, pp);
2322 KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2324 error = g_access(cp, 1, 1, 1);
2326 GJ_DEBUG(0, "Cannot access %s (error=%d).",
2328 g_journal_destroy(sc);
2333 * Journal is on the same provider as data, which means
2334 * that data provider ends where journal starts.
2336 sc->sc_mediasize = md->md_jstart;
2338 sc->sc_jconsumer = cp;
2341 /* Start switcher kproc if needed. */
2342 if (g_journal_switcher_proc == NULL)
2343 g_journal_start_switcher(mp);
2345 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE) {
2346 /* Journal is not complete yet. */
2349 /* Journal complete, cancel timeout. */
2350 callout_drain(&sc->sc_callout);
2353 error = kproc_create(g_journal_worker, sc, &sc->sc_worker, 0, 0,
2354 "g_journal %s", sc->sc_name);
2356 GJ_DEBUG(0, "Cannot create worker thread for %s.journal.",
2358 g_journal_destroy(sc);
2366 g_journal_destroy_consumer(void *arg, int flags __unused)
2368 struct g_consumer *cp;
2370 g_topology_assert();
2373 g_destroy_consumer(cp);
2377 g_journal_destroy(struct g_journal_softc *sc)
2380 struct g_provider *pp;
2381 struct g_consumer *cp;
2383 g_topology_assert();
2389 pp = LIST_FIRST(&gp->provider);
2391 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) {
2392 GJ_DEBUG(1, "Device %s is still open (r%dw%de%d).",
2393 pp->name, pp->acr, pp->acw, pp->ace);
2396 g_error_provider(pp, ENXIO);
2398 g_journal_flush(sc);
2399 g_journal_flush_send(sc);
2400 g_journal_switch(sc);
2403 sc->sc_flags |= (GJF_DEVICE_DESTROY | GJF_DEVICE_CLEAN);
2405 g_topology_unlock();
2407 if (sc->sc_rootmount != NULL) {
2408 GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2409 root_mount_rel(sc->sc_rootmount);
2410 sc->sc_rootmount = NULL;
2413 callout_drain(&sc->sc_callout);
2414 mtx_lock(&sc->sc_mtx);
2416 while (sc->sc_worker != NULL)
2417 msleep(&sc->sc_worker, &sc->sc_mtx, PRIBIO, "gj:destroy", 0);
2418 mtx_unlock(&sc->sc_mtx);
2421 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
2422 g_journal_metadata_update(sc);
2424 g_wither_provider(pp, ENXIO);
2428 mtx_destroy(&sc->sc_mtx);
2430 if (sc->sc_current_count != 0) {
2431 GJ_DEBUG(0, "Warning! Number of current requests %d.",
2432 sc->sc_current_count);
2436 LIST_FOREACH(cp, &gp->consumer, consumer) {
2437 if (cp->acr + cp->acw + cp->ace > 0)
2438 g_access(cp, -1, -1, -1);
2440 * We keep all consumers open for writting, so if I'll detach
2441 * and destroy consumer here, I'll get providers for taste, so
2442 * journal will be started again.
2443 * Sending an event here, prevents this from happening.
2445 g_post_event(g_journal_destroy_consumer, cp, M_WAITOK, NULL);
2447 g_wither_geom(gp, ENXIO);
2448 free(sc, M_JOURNAL);
2453 g_journal_taste_orphan(struct g_consumer *cp)
2456 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2457 cp->provider->name));
2460 static struct g_geom *
2461 g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2463 struct g_journal_metadata md;
2464 struct g_consumer *cp;
2468 g_topology_assert();
2469 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2470 GJ_DEBUG(2, "Tasting %s.", pp->name);
2471 if (pp->geom->class == mp)
2474 gp = g_new_geomf(mp, "journal:taste");
2475 /* This orphan function should be never called. */
2476 gp->orphan = g_journal_taste_orphan;
2477 cp = g_new_consumer(gp);
2479 error = g_journal_metadata_read(cp, &md);
2481 g_destroy_consumer(cp);
2487 if (md.md_provider[0] != '\0' &&
2488 !g_compare_names(md.md_provider, pp->name))
2490 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2492 if (g_journal_debug >= 2)
2493 journal_metadata_dump(&md);
2495 gp = g_journal_create(mp, pp, &md);
2499 static struct g_journal_softc *
2500 g_journal_find_device(struct g_class *mp, const char *name)
2502 struct g_journal_softc *sc;
2504 struct g_provider *pp;
2506 if (strncmp(name, "/dev/", 5) == 0)
2508 LIST_FOREACH(gp, &mp->geom, geom) {
2512 if (sc->sc_flags & GJF_DEVICE_DESTROY)
2514 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2516 pp = LIST_FIRST(&gp->provider);
2517 if (strcmp(sc->sc_name, name) == 0)
2519 if (pp != NULL && strcmp(pp->name, name) == 0)
2526 g_journal_ctl_destroy(struct gctl_req *req, struct g_class *mp)
2528 struct g_journal_softc *sc;
2534 g_topology_assert();
2536 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
2537 if (nargs == NULL) {
2538 gctl_error(req, "No '%s' argument.", "nargs");
2542 gctl_error(req, "Missing device(s).");
2546 for (i = 0; i < *nargs; i++) {
2547 snprintf(param, sizeof(param), "arg%d", i);
2548 name = gctl_get_asciiparam(req, param);
2550 gctl_error(req, "No 'arg%d' argument.", i);
2553 sc = g_journal_find_device(mp, name);
2555 gctl_error(req, "No such device: %s.", name);
2558 error = g_journal_destroy(sc);
2560 gctl_error(req, "Cannot destroy device %s (error=%d).",
2561 LIST_FIRST(&sc->sc_geom->provider)->name, error);
2568 g_journal_ctl_sync(struct gctl_req *req __unused, struct g_class *mp __unused)
2571 g_topology_assert();
2572 g_topology_unlock();
2573 g_journal_sync_requested++;
2574 wakeup(&g_journal_switcher_state);
2575 while (g_journal_sync_requested > 0)
2576 tsleep(&g_journal_sync_requested, PRIBIO, "j:sreq", hz / 2);
2581 g_journal_config(struct gctl_req *req, struct g_class *mp, const char *verb)
2585 g_topology_assert();
2587 version = gctl_get_paraml(req, "version", sizeof(*version));
2588 if (version == NULL) {
2589 gctl_error(req, "No '%s' argument.", "version");
2592 if (*version != G_JOURNAL_VERSION) {
2593 gctl_error(req, "Userland and kernel parts are out of sync.");
2597 if (strcmp(verb, "destroy") == 0 || strcmp(verb, "stop") == 0) {
2598 g_journal_ctl_destroy(req, mp);
2600 } else if (strcmp(verb, "sync") == 0) {
2601 g_journal_ctl_sync(req, mp);
2605 gctl_error(req, "Unknown verb.");
2609 g_journal_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2610 struct g_consumer *cp, struct g_provider *pp)
2612 struct g_journal_softc *sc;
2614 g_topology_assert();
2621 } else if (cp != NULL) {
2624 sbuf_printf(sb, "%s<Role>", indent);
2625 if (cp == sc->sc_dconsumer) {
2626 sbuf_printf(sb, "Data");
2629 if (cp == sc->sc_jconsumer) {
2631 sbuf_printf(sb, ",");
2632 sbuf_printf(sb, "Journal");
2634 sbuf_printf(sb, "</Role>\n");
2635 if (cp == sc->sc_jconsumer) {
2636 sbuf_printf(sb, "<Jstart>%jd</Jstart>\n",
2637 (intmax_t)sc->sc_jstart);
2638 sbuf_printf(sb, "<Jend>%jd</Jend>\n",
2639 (intmax_t)sc->sc_jend);
2642 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2646 static eventhandler_tag g_journal_event_shutdown = NULL;
2647 static eventhandler_tag g_journal_event_lowmem = NULL;
2650 g_journal_shutdown(void *arg, int howto __unused)
2653 struct g_geom *gp, *gp2;
2655 if (panicstr != NULL)
2659 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2660 if (gp->softc == NULL)
2662 GJ_DEBUG(0, "Shutting down geom %s.", gp->name);
2663 g_journal_destroy(gp->softc);
2665 g_topology_unlock();
2669 * Free cached requests from inactive queue in case of low memory.
2670 * We free GJ_FREE_AT_ONCE elements at once.
2672 #define GJ_FREE_AT_ONCE 4
2674 g_journal_lowmem(void *arg, int howto __unused)
2676 struct g_journal_softc *sc;
2680 u_int nfree = GJ_FREE_AT_ONCE;
2682 g_journal_stats_low_mem++;
2685 LIST_FOREACH(gp, &mp->geom, geom) {
2687 if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY))
2689 mtx_lock(&sc->sc_mtx);
2690 for (bp = sc->sc_inactive.jj_queue; nfree > 0 && bp != NULL;
2691 nfree--, bp = bp->bio_next) {
2693 * This is safe to free the bio_data, because:
2694 * 1. If bio_data is NULL it will be read from the
2696 * 2. If bp is sent down, it is first removed from the
2697 * inactive queue, so it's impossible to free the
2698 * data from under in-flight bio.
2699 * On the other hand, freeing elements from the active
2700 * queue, is not safe.
2702 if (bp->bio_data != NULL) {
2703 GJ_DEBUG(2, "Freeing data from %s.",
2705 gj_free(bp->bio_data, bp->bio_length);
2706 bp->bio_data = NULL;
2709 mtx_unlock(&sc->sc_mtx);
2713 g_topology_unlock();
2716 static void g_journal_switcher(void *arg);
2719 g_journal_init(struct g_class *mp)
2722 /* Pick a conservative value if provided value sucks. */
2723 if (g_journal_cache_divisor <= 0 ||
2724 (vm_kmem_size / g_journal_cache_divisor == 0)) {
2725 g_journal_cache_divisor = 5;
2727 if (g_journal_cache_limit > 0) {
2728 g_journal_cache_limit = vm_kmem_size / g_journal_cache_divisor;
2729 g_journal_cache_low =
2730 (g_journal_cache_limit / 100) * g_journal_cache_switch;
2732 g_journal_event_shutdown = EVENTHANDLER_REGISTER(shutdown_post_sync,
2733 g_journal_shutdown, mp, EVENTHANDLER_PRI_FIRST);
2734 if (g_journal_event_shutdown == NULL)
2735 GJ_DEBUG(0, "Warning! Cannot register shutdown event.");
2736 g_journal_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
2737 g_journal_lowmem, mp, EVENTHANDLER_PRI_FIRST);
2738 if (g_journal_event_lowmem == NULL)
2739 GJ_DEBUG(0, "Warning! Cannot register lowmem event.");
2743 g_journal_fini(struct g_class *mp)
2746 if (g_journal_event_shutdown != NULL) {
2747 EVENTHANDLER_DEREGISTER(shutdown_post_sync,
2748 g_journal_event_shutdown);
2750 if (g_journal_event_lowmem != NULL)
2751 EVENTHANDLER_DEREGISTER(vm_lowmem, g_journal_event_lowmem);
2752 g_journal_stop_switcher();
2755 DECLARE_GEOM_CLASS(g_journal_class, g_journal);
2757 static const struct g_journal_desc *
2758 g_journal_find_desc(const char *fstype)
2760 const struct g_journal_desc *desc;
2763 for (desc = g_journal_filesystems[i = 0]; desc != NULL;
2764 desc = g_journal_filesystems[++i]) {
2765 if (strcmp(desc->jd_fstype, fstype) == 0)
2772 g_journal_switch_wait(struct g_journal_softc *sc)
2776 mtx_assert(&sc->sc_mtx, MA_OWNED);
2777 if (g_journal_debug >= 2) {
2778 if (sc->sc_flush_in_progress > 0) {
2779 GJ_DEBUG(2, "%d requests flushing.",
2780 sc->sc_flush_in_progress);
2782 if (sc->sc_copy_in_progress > 0) {
2783 GJ_DEBUG(2, "%d requests copying.",
2784 sc->sc_copy_in_progress);
2786 if (sc->sc_flush_count > 0) {
2787 GJ_DEBUG(2, "%d requests to flush.",
2788 sc->sc_flush_count);
2790 if (sc->sc_delayed_count > 0) {
2791 GJ_DEBUG(2, "%d requests delayed.",
2792 sc->sc_delayed_count);
2795 g_journal_stats_switches++;
2796 if (sc->sc_copy_in_progress > 0)
2797 g_journal_stats_wait_for_copy++;
2798 GJ_TIMER_START(1, &bt);
2799 sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2800 sc->sc_flags |= GJF_DEVICE_SWITCH;
2802 while (sc->sc_flags & GJF_DEVICE_SWITCH) {
2803 msleep(&sc->sc_journal_copying, &sc->sc_mtx, PRIBIO,
2806 GJ_TIMER_STOP(1, &bt, "Switch time of %s", sc->sc_name);
2810 g_journal_do_switch(struct g_class *classp)
2812 struct g_journal_softc *sc;
2813 const struct g_journal_desc *desc;
2821 LIST_FOREACH(gp, &classp->geom, geom) {
2825 if (sc->sc_flags & GJF_DEVICE_DESTROY)
2827 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2829 mtx_lock(&sc->sc_mtx);
2830 sc->sc_flags |= GJF_DEVICE_BEFORE_SWITCH;
2831 mtx_unlock(&sc->sc_mtx);
2833 g_topology_unlock();
2835 mtx_lock(&mountlist_mtx);
2836 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2837 if (mp->mnt_gjprovider == NULL)
2839 if (mp->mnt_flag & MNT_RDONLY)
2841 desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
2844 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
2846 /* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
2849 sc = g_journal_find_device(classp, mp->mnt_gjprovider);
2850 g_topology_unlock();
2853 GJ_DEBUG(0, "Cannot find journal geom for %s.",
2854 mp->mnt_gjprovider);
2856 } else if (JEMPTY(sc)) {
2857 mtx_lock(&sc->sc_mtx);
2858 sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2859 mtx_unlock(&sc->sc_mtx);
2860 GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
2864 mountpoint = mp->mnt_stat.f_mntonname;
2866 error = vn_start_write(NULL, &mp, V_WAIT);
2868 GJ_DEBUG(0, "vn_start_write(%s) failed (error=%d).",
2873 save = curthread_pflags_set(TDP_SYNCIO);
2875 GJ_TIMER_START(1, &bt);
2876 vfs_msync(mp, MNT_NOWAIT);
2877 GJ_TIMER_STOP(1, &bt, "Msync time of %s", mountpoint);
2879 GJ_TIMER_START(1, &bt);
2880 error = VFS_SYNC(mp, MNT_NOWAIT);
2882 GJ_TIMER_STOP(1, &bt, "Sync time of %s", mountpoint);
2884 GJ_DEBUG(0, "Cannot sync file system %s (error=%d).",
2888 curthread_pflags_restore(save);
2890 vn_finished_write(mp);
2896 * Send BIO_FLUSH before freezing the file system, so it can be
2897 * faster after the freeze.
2899 GJ_TIMER_START(1, &bt);
2900 g_journal_flush_cache(sc);
2901 GJ_TIMER_STOP(1, &bt, "BIO_FLUSH time of %s", sc->sc_name);
2903 GJ_TIMER_START(1, &bt);
2904 error = vfs_write_suspend(mp, VS_SKIP_UNMOUNT);
2905 GJ_TIMER_STOP(1, &bt, "Suspend time of %s", mountpoint);
2907 GJ_DEBUG(0, "Cannot suspend file system %s (error=%d).",
2912 error = desc->jd_clean(mp);
2916 mtx_lock(&sc->sc_mtx);
2917 g_journal_switch_wait(sc);
2918 mtx_unlock(&sc->sc_mtx);
2920 vfs_write_resume(mp, 0);
2922 mtx_lock(&mountlist_mtx);
2925 mtx_unlock(&mountlist_mtx);
2930 LIST_FOREACH(gp, &g_journal_class.geom, geom) {
2934 mtx_lock(&sc->sc_mtx);
2935 if ((sc->sc_type & GJ_TYPE_COMPLETE) == GJ_TYPE_COMPLETE &&
2936 !(sc->sc_flags & GJF_DEVICE_DESTROY) &&
2937 (sc->sc_flags & GJF_DEVICE_BEFORE_SWITCH)) {
2940 mtx_unlock(&sc->sc_mtx);
2943 g_topology_unlock();
2946 mtx_assert(&sc->sc_mtx, MA_OWNED);
2947 g_journal_switch_wait(sc);
2948 mtx_unlock(&sc->sc_mtx);
2953 g_journal_start_switcher(struct g_class *mp)
2957 g_topology_assert();
2958 MPASS(g_journal_switcher_proc == NULL);
2959 g_journal_switcher_state = GJ_SWITCHER_WORKING;
2960 error = kproc_create(g_journal_switcher, mp, &g_journal_switcher_proc,
2961 0, 0, "g_journal switcher");
2962 KASSERT(error == 0, ("Cannot create switcher thread."));
2966 g_journal_stop_switcher(void)
2968 g_topology_assert();
2969 MPASS(g_journal_switcher_proc != NULL);
2970 g_journal_switcher_state = GJ_SWITCHER_DIE;
2971 wakeup(&g_journal_switcher_state);
2972 while (g_journal_switcher_state != GJ_SWITCHER_DIED)
2973 tsleep(&g_journal_switcher_state, PRIBIO, "jfini:wait", hz / 5);
2974 GJ_DEBUG(1, "Switcher died.");
2975 g_journal_switcher_proc = NULL;
2979 * TODO: Kill switcher thread on last geom destruction?
2982 g_journal_switcher(void *arg)
2989 curthread->td_pflags |= TDP_NORUNNINGBUF;
2991 g_journal_switcher_wokenup = 0;
2992 error = tsleep(&g_journal_switcher_state, PRIBIO, "jsw:wait",
2993 g_journal_switch_time * hz);
2994 if (g_journal_switcher_state == GJ_SWITCHER_DIE) {
2995 g_journal_switcher_state = GJ_SWITCHER_DIED;
2996 GJ_DEBUG(1, "Switcher exiting.");
2997 wakeup(&g_journal_switcher_state);
3000 if (error == 0 && g_journal_sync_requested == 0) {
3001 GJ_DEBUG(1, "Out of cache, force switch (used=%jd "
3002 "limit=%jd).", (intmax_t)g_journal_cache_used,
3003 (intmax_t)g_journal_cache_limit);
3005 GJ_TIMER_START(1, &bt);
3006 g_journal_do_switch(mp);
3007 GJ_TIMER_STOP(1, &bt, "Entire switch time");
3008 if (g_journal_sync_requested > 0) {
3009 g_journal_sync_requested = 0;
3010 wakeup(&g_journal_sync_requested);