2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2004 Max Khon
5 * Copyright (c) 2014 Juniper Networks, Inc.
6 * Copyright (c) 2006-2016 Maxim Sobolev <sobomax@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 #include "opt_zstdio.h"
37 #include <sys/param.h>
39 #include <sys/endian.h>
40 #include <sys/errno.h>
41 #include <sys/kernel.h>
43 #include <sys/mutex.h>
44 #include <sys/malloc.h>
45 #include <sys/sysctl.h>
46 #include <sys/systm.h>
47 #include <sys/kthread.h>
49 #include <geom/geom.h>
51 #include <geom/uzip/g_uzip.h>
52 #include <geom/uzip/g_uzip_cloop.h>
53 #include <geom/uzip/g_uzip_softc.h>
54 #include <geom/uzip/g_uzip_dapi.h>
55 #include <geom/uzip/g_uzip_zlib.h>
56 #include <geom/uzip/g_uzip_lzma.h>
58 #include <geom/uzip/g_uzip_zstd.h>
60 #include <geom/uzip/g_uzip_wrkthr.h>
62 MALLOC_DEFINE(M_GEOM_UZIP, "geom_uzip", "GEOM UZIP data structures");
64 FEATURE(geom_uzip, "GEOM read-only compressed disks support");
70 unsigned char padded:1;
71 #define BLEN_UNDEF UINT32_MAX
75 #define ABS(a) ((a) < 0 ? -(a) : (a))
78 #define BLK_IN_RANGE(mcn, bcn, ilen) \
79 (((bcn) != BLEN_UNDEF) && ( \
80 ((ilen) >= 0 && (mcn >= bcn) && (mcn <= ((intmax_t)(bcn) + (ilen)))) || \
81 ((ilen) < 0 && (mcn <= bcn) && (mcn >= ((intmax_t)(bcn) + (ilen)))) \
84 #ifdef GEOM_UZIP_DEBUG
85 # define GEOM_UZIP_DBG_DEFAULT 3
87 # define GEOM_UZIP_DBG_DEFAULT 0
91 #define GUZ_DBG_INFO 2
95 #define GUZ_DEV_SUFX ".uzip"
96 #define GUZ_DEV_NAME(p) (p GUZ_DEV_SUFX)
98 static char g_uzip_attach_to[MAXPATHLEN] = {"*"};
99 static char g_uzip_noattach_to[MAXPATHLEN] = {GUZ_DEV_NAME("*")};
100 TUNABLE_STR("kern.geom.uzip.attach_to", g_uzip_attach_to,
101 sizeof(g_uzip_attach_to));
102 TUNABLE_STR("kern.geom.uzip.noattach_to", g_uzip_noattach_to,
103 sizeof(g_uzip_noattach_to));
105 SYSCTL_DECL(_kern_geom);
106 SYSCTL_NODE(_kern_geom, OID_AUTO, uzip, CTLFLAG_RW, 0, "GEOM_UZIP stuff");
107 static u_int g_uzip_debug = GEOM_UZIP_DBG_DEFAULT;
108 SYSCTL_UINT(_kern_geom_uzip, OID_AUTO, debug, CTLFLAG_RWTUN, &g_uzip_debug, 0,
109 "Debug level (0-4)");
110 static u_int g_uzip_debug_block = BLEN_UNDEF;
111 SYSCTL_UINT(_kern_geom_uzip, OID_AUTO, debug_block, CTLFLAG_RWTUN,
112 &g_uzip_debug_block, 0, "Debug operations around specific cluster#");
114 #define DPRINTF(lvl, a) \
115 if ((lvl) <= g_uzip_debug) { \
118 #define DPRINTF_BLK(lvl, cn, a) \
119 if ((lvl) <= g_uzip_debug || \
120 BLK_IN_RANGE(cn, g_uzip_debug_block, 8) || \
121 BLK_IN_RANGE(cn, g_uzip_debug_block, -8)) { \
124 #define DPRINTF_BRNG(lvl, bcn, ecn, a) \
125 KASSERT(bcn < ecn, ("DPRINTF_BRNG: invalid range (%ju, %ju)", \
126 (uintmax_t)bcn, (uintmax_t)ecn)); \
127 if (((lvl) <= g_uzip_debug) || \
128 BLK_IN_RANGE(g_uzip_debug_block, bcn, \
129 (intmax_t)ecn - (intmax_t)bcn)) { \
133 #define UZIP_CLASS_NAME "UZIP"
136 * Maximum allowed valid block size (to prevent foot-shooting)
138 #define MAX_BLKSZ (MAXPHYS)
140 static char CLOOP_MAGIC_START[] = "#!/bin/sh\n";
142 static void g_uzip_read_done(struct bio *bp);
143 static void g_uzip_do(struct g_uzip_softc *, struct bio *bp);
146 g_uzip_softc_free(struct g_uzip_softc *sc, struct g_geom *gp)
150 DPRINTF(GUZ_DBG_INFO, ("%s: %d requests, %d cached\n",
151 gp->name, sc->req_total, sc->req_cached));
154 mtx_lock(&sc->queue_mtx);
155 sc->wrkthr_flags |= GUZ_SHUTDOWN;
157 while (!(sc->wrkthr_flags & GUZ_EXITING)) {
158 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "guzfree",
161 mtx_unlock(&sc->queue_mtx);
163 sc->dcp->free(sc->dcp);
164 free(sc->toc, M_GEOM_UZIP);
165 mtx_destroy(&sc->queue_mtx);
166 mtx_destroy(&sc->last_mtx);
167 free(sc->last_buf, M_GEOM_UZIP);
168 free(sc, M_GEOM_UZIP);
172 g_uzip_cached(struct g_geom *gp, struct bio *bp)
174 struct g_uzip_softc *sc;
176 size_t blk, blkofs, usz;
179 ofs = bp->bio_offset + bp->bio_completed;
180 blk = ofs / sc->blksz;
181 mtx_lock(&sc->last_mtx);
182 if (blk == sc->last_blk) {
183 blkofs = ofs % sc->blksz;
184 usz = sc->blksz - blkofs;
185 if (bp->bio_resid < usz)
187 memcpy(bp->bio_data + bp->bio_completed, sc->last_buf + blkofs,
190 mtx_unlock(&sc->last_mtx);
192 DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: offset=%jd: got %jd bytes "
193 "from cache\n", __func__, gp->name, bp, (intmax_t)ofs,
196 bp->bio_completed += usz;
197 bp->bio_resid -= usz;
199 if (bp->bio_resid == 0) {
204 mtx_unlock(&sc->last_mtx);
209 #define BLK_ENDS(sc, bi) ((sc)->toc[(bi)].offset + \
210 (sc)->toc[(bi)].blen)
212 #define BLK_IS_CONT(sc, bi) (BLK_ENDS((sc), (bi) - 1) == \
213 (sc)->toc[(bi)].offset)
214 #define BLK_IS_NIL(sc, bi) ((sc)->toc[(bi)].blen == 0)
216 #define TOFF_2_BOFF(sc, pp, bi) ((sc)->toc[(bi)].offset - \
217 (sc)->toc[(bi)].offset % (pp)->sectorsize)
218 #define TLEN_2_BLEN(sc, pp, bp, ei) roundup(BLK_ENDS((sc), (ei)) - \
219 (bp)->bio_offset, (pp)->sectorsize)
222 g_uzip_request(struct g_geom *gp, struct bio *bp)
224 struct g_uzip_softc *sc;
226 struct g_consumer *cp;
227 struct g_provider *pp;
228 off_t ofs, start_blk_ofs;
229 size_t i, start_blk, end_blk, zsize;
231 if (g_uzip_cached(gp, bp) != 0)
236 cp = LIST_FIRST(&gp->consumer);
239 ofs = bp->bio_offset + bp->bio_completed;
240 start_blk = ofs / sc->blksz;
241 KASSERT(start_blk < sc->nblocks, ("start_blk out of range"));
242 end_blk = howmany(ofs + bp->bio_resid, sc->blksz);
243 KASSERT(end_blk <= sc->nblocks, ("end_blk out of range"));
245 for (; BLK_IS_NIL(sc, start_blk) && start_blk < end_blk; start_blk++) {
246 /* Fill in any leading Nil blocks */
247 start_blk_ofs = ofs % sc->blksz;
248 zsize = MIN(sc->blksz - start_blk_ofs, bp->bio_resid);
249 DPRINTF_BLK(GUZ_DBG_IO, start_blk, ("%s/%s: %p/%ju: "
250 "filling %ju zero bytes\n", __func__, gp->name, gp,
251 (uintmax_t)bp->bio_completed, (uintmax_t)zsize));
252 bzero(bp->bio_data + bp->bio_completed, zsize);
253 bp->bio_completed += zsize;
254 bp->bio_resid -= zsize;
258 if (start_blk == end_blk) {
259 KASSERT(bp->bio_resid == 0, ("bp->bio_resid is invalid"));
261 * No non-Nil data is left, complete request immediately.
263 DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: all done returning %ju "
264 "bytes\n", __func__, gp->name, gp,
265 (uintmax_t)bp->bio_completed));
270 for (i = start_blk + 1; i < end_blk; i++) {
271 /* Trim discontinuous areas if any */
272 if (!BLK_IS_CONT(sc, i)) {
278 DPRINTF_BRNG(GUZ_DBG_IO, start_blk, end_blk, ("%s/%s: %p: "
279 "start=%u (%ju[%jd]), end=%u (%ju)\n", __func__, gp->name, bp,
280 (u_int)start_blk, (uintmax_t)sc->toc[start_blk].offset,
281 (intmax_t)sc->toc[start_blk].blen,
282 (u_int)end_blk, (uintmax_t)BLK_ENDS(sc, end_blk - 1)));
284 bp2 = g_clone_bio(bp);
286 g_io_deliver(bp, ENOMEM);
289 bp2->bio_done = g_uzip_read_done;
291 bp2->bio_offset = TOFF_2_BOFF(sc, pp, start_blk);
293 bp2->bio_length = TLEN_2_BLEN(sc, pp, bp2, end_blk - 1);
294 if (bp2->bio_length <= MAXPHYS) {
297 if (end_blk == (start_blk + 1)) {
303 DPRINTF(GUZ_DBG_IO, ("%s/%s: bp2->bio_length = %jd, "
304 "bp2->bio_offset = %jd\n", __func__, gp->name,
305 (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset));
307 bp2->bio_data = malloc(bp2->bio_length, M_GEOM_UZIP, M_NOWAIT);
308 if (bp2->bio_data == NULL) {
310 g_io_deliver(bp, ENOMEM);
314 DPRINTF_BRNG(GUZ_DBG_IO, start_blk, end_blk, ("%s/%s: %p: "
315 "reading %jd bytes from offset %jd\n", __func__, gp->name, bp,
316 (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset));
318 g_io_request(bp2, cp);
323 g_uzip_read_done(struct bio *bp)
327 struct g_uzip_softc *sc;
329 bp2 = bp->bio_parent;
330 gp = bp2->bio_to->geom;
333 mtx_lock(&sc->queue_mtx);
334 bioq_disksort(&sc->bio_queue, bp);
335 mtx_unlock(&sc->queue_mtx);
340 g_uzip_memvcmp(const void *memory, unsigned char val, size_t size)
344 mm = (const u_char *)memory;
345 return (*mm == val) && memcmp(mm, mm + 1, size - 1) == 0;
349 g_uzip_do(struct g_uzip_softc *sc, struct bio *bp)
352 struct g_provider *pp;
353 struct g_consumer *cp;
357 size_t blk, blkofs, len, ulen, firstblk;
360 bp2 = bp->bio_parent;
361 gp = bp2->bio_to->geom;
363 cp = LIST_FIRST(&gp->consumer);
366 bp2->bio_error = bp->bio_error;
367 if (bp2->bio_error != 0)
370 /* Make sure there's forward progress. */
371 if (bp->bio_completed == 0) {
372 bp2->bio_error = ECANCELED;
376 ofs = bp2->bio_offset + bp2->bio_completed;
377 firstblk = blk = ofs / sc->blksz;
378 blkofs = ofs % sc->blksz;
379 data = bp->bio_data + sc->toc[blk].offset % pp->sectorsize;
380 data2 = bp2->bio_data + bp2->bio_completed;
381 while (bp->bio_completed && bp2->bio_resid) {
382 if (blk > firstblk && !BLK_IS_CONT(sc, blk)) {
383 DPRINTF_BLK(GUZ_DBG_IO, blk, ("%s/%s: %p: backref'ed "
384 "cluster #%u requested, looping around\n",
385 __func__, gp->name, bp2, (u_int)blk));
388 ulen = MIN(sc->blksz - blkofs, bp2->bio_resid);
389 len = sc->toc[blk].blen;
390 DPRINTF(GUZ_DBG_IO, ("%s/%s: %p/%ju: data2=%p, ulen=%u, "
391 "data=%p, len=%u\n", __func__, gp->name, gp,
392 bp->bio_completed, data2, (u_int)ulen, data, (u_int)len));
394 /* All zero block: no cache update */
397 } else if (len <= bp->bio_completed) {
398 mtx_lock(&sc->last_mtx);
399 err = sc->dcp->decompress(sc->dcp, gp->name, data,
401 if (err != 0 && sc->toc[blk].last != 0) {
403 * Last block decompression has failed, check
404 * if it's just zero padding.
406 if (g_uzip_memvcmp(data, '\0', len) == 0) {
407 sc->toc[blk].blen = 0;
409 mtx_unlock(&sc->last_mtx);
416 mtx_unlock(&sc->last_mtx);
417 bp2->bio_error = EILSEQ;
418 DPRINTF(GUZ_DBG_ERR, ("%s/%s: decompress"
419 "(%p, %ju, %ju) failed\n", __func__,
420 gp->name, sc->dcp, (uintmax_t)blk,
425 memcpy(data2, sc->last_buf + blkofs, ulen);
426 mtx_unlock(&sc->last_mtx);
427 err = sc->dcp->rewind(sc->dcp, gp->name);
429 bp2->bio_error = EILSEQ;
430 DPRINTF(GUZ_DBG_ERR, ("%s/%s: rewind(%p) "
431 "failed\n", __func__, gp->name, sc->dcp));
439 bp2->bio_completed += ulen;
440 bp2->bio_resid -= ulen;
441 bp->bio_completed -= len;
447 /* Finish processing the request. */
448 free(bp->bio_data, M_GEOM_UZIP);
450 if (bp2->bio_error != 0 || bp2->bio_resid == 0)
451 g_io_deliver(bp2, bp2->bio_error);
453 g_uzip_request(gp, bp2);
457 g_uzip_start(struct bio *bp)
459 struct g_provider *pp;
461 struct g_uzip_softc *sc;
466 DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: cmd=%d, offset=%jd, length=%jd, "
467 "buffer=%p\n", __func__, gp->name, bp, bp->bio_cmd,
468 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length, bp->bio_data));
473 if (bp->bio_cmd == BIO_GETATTR) {
475 struct g_consumer *cp;
477 struct g_provider *pp;
479 /* pass on MNT:* requests and ignore others */
480 if (strncmp(bp->bio_attribute, "MNT:", 4) == 0) {
481 bp2 = g_clone_bio(bp);
483 g_io_deliver(bp, ENOMEM);
486 bp2->bio_done = g_std_done;
489 cp = LIST_FIRST(&gp->consumer);
490 g_io_request(bp2, cp);
494 if (bp->bio_cmd != BIO_READ) {
495 g_io_deliver(bp, EOPNOTSUPP);
499 bp->bio_resid = bp->bio_length;
500 bp->bio_completed = 0;
502 g_uzip_request(gp, bp);
506 g_uzip_orphan(struct g_consumer *cp)
510 g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, cp->provider->name);
514 g_uzip_softc_free(gp->softc, gp);
516 g_wither_geom(gp, ENXIO);
520 g_uzip_access(struct g_provider *pp, int dr, int dw, int de)
523 struct g_consumer *cp;
526 cp = LIST_FIRST(&gp->consumer);
527 KASSERT (cp != NULL, ("g_uzip_access but no consumer"));
529 if (cp->acw + dw > 0)
532 return (g_access(cp, dr, dw, de));
536 g_uzip_spoiled(struct g_consumer *cp)
540 G_VALID_CONSUMER(cp);
542 g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, gp->name);
545 g_uzip_softc_free(gp->softc, gp);
547 g_wither_geom(gp, ENXIO);
551 g_uzip_parse_toc(struct g_uzip_softc *sc, struct g_provider *pp,
554 uint32_t i, j, backref_to;
555 uint64_t max_offset, min_offset;
556 struct g_uzip_blk *last_blk;
558 min_offset = sizeof(struct cloop_header) +
559 (sc->nblocks + 1) * sizeof(uint64_t);
560 max_offset = sc->toc[0].offset - 1;
561 last_blk = &sc->toc[0];
562 for (i = 0; i < sc->nblocks; i++) {
563 /* First do some bounds checking */
564 if ((sc->toc[i].offset < min_offset) ||
565 (sc->toc[i].offset > pp->mediasize)) {
568 DPRINTF_BLK(GUZ_DBG_IO, i, ("%s: cluster #%u "
569 "offset=%ju max_offset=%ju\n", gp->name,
570 (u_int)i, (uintmax_t)sc->toc[i].offset,
571 (uintmax_t)max_offset));
572 backref_to = BLEN_UNDEF;
573 if (sc->toc[i].offset < max_offset) {
575 * For the backref'ed blocks search already parsed
576 * TOC entries for the matching offset and copy the
577 * size from matched entry.
579 for (j = 0; j <= i; j++) {
580 if (sc->toc[j].offset == sc->toc[i].offset &&
581 !BLK_IS_NIL(sc, j)) {
587 DPRINTF(GUZ_DBG_ERR, ("%s: cannot match "
588 "backref'ed offset at cluster #%u\n",
592 sc->toc[i].blen = sc->toc[j].blen;
595 last_blk = &sc->toc[i];
597 * For the "normal blocks" seek forward until we hit
598 * block whose offset is larger than ours and assume
599 * it's going to be the next one.
601 for (j = i + 1; j < sc->nblocks + 1; j++) {
602 if (sc->toc[j].offset > max_offset) {
606 sc->toc[i].blen = sc->toc[j].offset -
608 if (BLK_ENDS(sc, i) > pp->mediasize) {
609 DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u "
610 "extends past media boundary (%ju > %ju)\n",
612 (uintmax_t)BLK_ENDS(sc, i),
613 (intmax_t)pp->mediasize));
616 KASSERT(max_offset <= sc->toc[i].offset, (
617 "%s: max_offset is incorrect: %ju",
618 gp->name, (uintmax_t)max_offset));
619 max_offset = BLK_ENDS(sc, i) - 1;
621 DPRINTF_BLK(GUZ_DBG_TOC, i, ("%s: cluster #%u, original %u "
622 "bytes, in %u bytes", gp->name, i, sc->blksz,
624 if (backref_to != BLEN_UNDEF) {
625 DPRINTF_BLK(GUZ_DBG_TOC, i, (" (->#%u)",
628 DPRINTF_BLK(GUZ_DBG_TOC, i, ("\n"));
631 /* Do a second pass to validate block lengths */
632 for (i = 0; i < sc->nblocks; i++) {
633 if (sc->toc[i].blen > sc->dcp->max_blen) {
634 if (sc->toc[i].last == 0) {
635 DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u "
636 "length (%ju) exceeds "
637 "max_blen (%ju)\n", gp->name, i,
638 (uintmax_t)sc->toc[i].blen,
639 (uintmax_t)sc->dcp->max_blen));
642 DPRINTF(GUZ_DBG_INFO, ("%s: cluster #%u extra "
643 "padding is detected, trimmed to %ju\n",
644 gp->name, i, (uintmax_t)sc->dcp->max_blen));
645 sc->toc[i].blen = sc->dcp->max_blen;
646 sc->toc[i].padded = 1;
652 DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u: invalid offset %ju, "
653 "min_offset=%ju mediasize=%jd\n", gp->name, (u_int)i,
654 sc->toc[i].offset, min_offset, pp->mediasize));
658 static struct g_geom *
659 g_uzip_taste(struct g_class *mp, struct g_provider *pp, int flags)
662 uint32_t i, total_offsets, offsets_read, blk;
664 struct cloop_header *header;
665 struct g_consumer *cp;
667 struct g_provider *pp2;
668 struct g_uzip_softc *sc;
676 g_trace(G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name);
679 /* Skip providers that are already open for writing. */
683 if ((fnmatch(g_uzip_attach_to, pp->name, 0) != 0) ||
684 (fnmatch(g_uzip_noattach_to, pp->name, 0) == 0)) {
685 DPRINTF(GUZ_DBG_INFO, ("%s(%s,%s), ignoring\n", __func__,
686 mp->name, pp->name));
693 * Create geom instance.
695 gp = g_new_geomf(mp, GUZ_DEV_NAME("%s"), pp->name);
696 cp = g_new_consumer(gp);
697 error = g_attach(cp, pp);
699 error = g_access(cp, 1, 0, 0);
706 * Read cloop header, look for CLOOP magic, perform
707 * other validity checks.
709 DPRINTF(GUZ_DBG_INFO, ("%s: media sectorsize %u, mediasize %jd\n",
710 gp->name, pp->sectorsize, (intmax_t)pp->mediasize));
711 buf = g_read_data(cp, 0, pp->sectorsize, NULL);
714 header = (struct cloop_header *) buf;
715 if (strncmp(header->magic, CLOOP_MAGIC_START,
716 sizeof(CLOOP_MAGIC_START) - 1) != 0) {
717 DPRINTF(GUZ_DBG_ERR, ("%s: no CLOOP magic\n", gp->name));
721 cloop_version = header->magic[CLOOP_OFS_VERSN];
722 switch (header->magic[CLOOP_OFS_COMPR]) {
723 case CLOOP_COMP_LZMA:
724 case CLOOP_COMP_LZMA_DDP:
726 if (cloop_version < CLOOP_MINVER_LZMA) {
727 DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n",
731 DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_LZMA image found\n",
734 case CLOOP_COMP_LIBZ:
735 case CLOOP_COMP_LIBZ_DDP:
737 if (cloop_version < CLOOP_MINVER_ZLIB) {
738 DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n",
742 DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_ZLIB image found\n",
745 case CLOOP_COMP_ZSTD:
746 case CLOOP_COMP_ZSTD_DDP:
747 if (cloop_version < CLOOP_MINVER_ZSTD) {
748 DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n",
753 DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_ZSTD image found.\n",
757 DPRINTF(GUZ_DBG_ERR, ("%s: GEOM_UZIP_ZSTD image found, but "
758 "this kernel was configured with Zstd disabled.\n",
764 DPRINTF(GUZ_DBG_ERR, ("%s: unsupported image type\n",
770 * Initialize softc and read offsets.
772 sc = malloc(sizeof(*sc), M_GEOM_UZIP, M_WAITOK | M_ZERO);
774 sc->blksz = ntohl(header->blksz);
775 sc->nblocks = ntohl(header->nblocks);
776 if (sc->blksz % 512 != 0) {
777 printf("%s: block size (%u) should be multiple of 512.\n",
778 gp->name, sc->blksz);
781 if (sc->blksz > MAX_BLKSZ) {
782 printf("%s: block size (%u) should not be larger than %d.\n",
783 gp->name, sc->blksz, MAX_BLKSZ);
785 total_offsets = sc->nblocks + 1;
786 if (sizeof(struct cloop_header) +
787 total_offsets * sizeof(uint64_t) > pp->mediasize) {
788 printf("%s: media too small for %u blocks\n",
789 gp->name, sc->nblocks);
792 sc->toc = malloc(total_offsets * sizeof(struct g_uzip_blk),
793 M_GEOM_UZIP, M_WAITOK | M_ZERO);
794 offsets_read = MIN(total_offsets,
795 (pp->sectorsize - sizeof(*header)) / sizeof(uint64_t));
796 for (i = 0; i < offsets_read; i++) {
797 sc->toc[i].offset = be64toh(((uint64_t *) (header + 1))[i]);
798 sc->toc[i].blen = BLEN_UNDEF;
800 DPRINTF(GUZ_DBG_INFO, ("%s: %u offsets in the first sector\n",
801 gp->name, offsets_read));
804 * The following invalidates the "header" pointer into the first
809 for (blk = 1; offsets_read < total_offsets; blk++) {
814 cp, blk * pp->sectorsize, pp->sectorsize, NULL);
817 nread = MIN(total_offsets - offsets_read,
818 pp->sectorsize / sizeof(uint64_t));
819 DPRINTF(GUZ_DBG_TOC, ("%s: %u offsets read from sector %d\n",
820 gp->name, nread, blk));
821 for (i = 0; i < nread; i++) {
822 sc->toc[offsets_read + i].offset =
823 be64toh(((uint64_t *) buf)[i]);
824 sc->toc[offsets_read + i].blen = BLEN_UNDEF;
826 offsets_read += nread;
831 DPRINTF(GUZ_DBG_INFO, ("%s: done reading %u block offsets from %u "
832 "sectors\n", gp->name, offsets_read, blk));
833 if (sc->nblocks != offsets_read) {
834 DPRINTF(GUZ_DBG_ERR, ("%s: read %s offsets than expected "
835 "blocks\n", gp->name,
836 sc->nblocks < offsets_read ? "more" : "less"));
842 sc->dcp = g_uzip_zlib_ctor(sc->blksz);
845 sc->dcp = g_uzip_lzma_ctor(sc->blksz);
849 sc->dcp = g_uzip_zstd_ctor(sc->blksz);
857 * The last+1 block was not always initialized by earlier versions of
858 * mkuzip(8). However, *if* it is initialized, the difference between
859 * its offset and the prior block's offset represents the length of the
860 * final real compressed block, and this is significant to the
863 if (cloop_version >= CLOOP_MINVER_RELIABLE_LASTBLKSZ &&
864 sc->toc[sc->nblocks].offset != 0) {
865 if (sc->toc[sc->nblocks].offset > pp->mediasize) {
867 ("%s: bogus n+1 offset %ju > mediasize %ju\n",
868 gp->name, (uintmax_t)sc->toc[sc->nblocks].offset,
869 (uintmax_t)pp->mediasize));
873 sc->toc[sc->nblocks].offset = pp->mediasize;
875 /* Massage TOC (table of contents), make sure it is sound */
876 if (g_uzip_parse_toc(sc, pp, gp) != 0) {
877 DPRINTF(GUZ_DBG_ERR, ("%s: TOC error\n", gp->name));
880 mtx_init(&sc->last_mtx, "geom_uzip cache", NULL, MTX_DEF);
881 mtx_init(&sc->queue_mtx, "geom_uzip wrkthread", NULL, MTX_DEF);
882 bioq_init(&sc->bio_queue);
884 sc->last_buf = malloc(sc->blksz, M_GEOM_UZIP, M_WAITOK);
888 sc->uzip_do = &g_uzip_do;
890 error = kproc_create(g_uzip_wrkthr, sc, &sc->procp, 0, 0, "%s",
897 pp2 = g_new_providerf(gp, "%s", gp->name);
898 pp2->sectorsize = 512;
899 pp2->mediasize = (off_t)sc->nblocks * sc->blksz;
900 pp2->stripesize = pp->stripesize;
901 pp2->stripeoffset = pp->stripeoffset;
902 g_error_provider(pp2, 0);
903 g_access(cp, -1, 0, 0);
905 DPRINTF(GUZ_DBG_INFO, ("%s: taste ok (%d, %ju), (%ju, %ju), %x\n",
906 gp->name, pp2->sectorsize, (uintmax_t)pp2->mediasize,
907 (uintmax_t)pp2->stripeoffset, (uintmax_t)pp2->stripesize, pp2->flags));
908 DPRINTF(GUZ_DBG_INFO, ("%s: %u x %u blocks\n", gp->name, sc->nblocks,
913 free(sc->last_buf, M_GEOM);
914 mtx_destroy(&sc->queue_mtx);
915 mtx_destroy(&sc->last_mtx);
917 sc->dcp->free(sc->dcp);
919 free(sc->toc, M_GEOM);
921 free(gp->softc, M_GEOM_UZIP);
928 g_access(cp, -1, 0, 0);
931 g_destroy_consumer(cp);
938 g_uzip_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
940 struct g_provider *pp;
942 KASSERT(gp != NULL, ("NULL geom"));
943 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, gp->name);
946 if (gp->softc == NULL) {
947 DPRINTF(GUZ_DBG_ERR, ("%s(%s): gp->softc == NULL\n", __func__,
952 pp = LIST_FIRST(&gp->provider);
953 KASSERT(pp != NULL, ("NULL provider"));
954 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)
957 g_uzip_softc_free(gp->softc, gp);
959 g_wither_geom(gp, ENXIO);
964 static struct g_class g_uzip_class = {
965 .name = UZIP_CLASS_NAME,
966 .version = G_VERSION,
967 .taste = g_uzip_taste,
968 .destroy_geom = g_uzip_destroy_geom,
970 .start = g_uzip_start,
971 .orphan = g_uzip_orphan,
972 .access = g_uzip_access,
973 .spoiled = g_uzip_spoiled,
976 DECLARE_GEOM_CLASS(g_uzip_class, g_uzip);
977 MODULE_DEPEND(g_uzip, xz, 1, 1, 1);
978 MODULE_DEPEND(g_uzip, zlib, 1, 1, 1);
979 MODULE_VERSION(geom_uzip, 0);