2 * Copyright (c) 2004 Max Khon
3 * Copyright (c) 2014 Juniper Networks, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
33 #include <sys/endian.h>
34 #include <sys/errno.h>
35 #include <sys/kernel.h>
37 #include <sys/mutex.h>
38 #include <sys/malloc.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
42 #include <geom/geom.h>
45 FEATURE(geom_uzip, "GEOM uzip read-only compressed disks support");
47 #undef GEOM_UZIP_DEBUG
48 #ifdef GEOM_UZIP_DEBUG
49 #define DPRINTF(a) printf a
54 static MALLOC_DEFINE(M_GEOM_UZIP, "geom_uzip", "GEOM UZIP data structures");
56 #define UZIP_CLASS_NAME "UZIP"
59 * Maximum allowed valid block size (to prevent foot-shooting)
61 #define MAX_BLKSZ (MAXPHYS - MAXPHYS / 1000 - 12)
64 * Integer values (block size, number of blocks, offsets)
65 * are stored in big-endian (network) order on disk and struct cloop_header
66 * and in native order in struct g_uzip_softc
69 #define CLOOP_MAGIC_LEN 128
70 static char CLOOP_MAGIC_START[] = "#!/bin/sh\n";
73 char magic[CLOOP_MAGIC_LEN]; /* cloop magic */
74 uint32_t blksz; /* block size */
75 uint32_t nblocks; /* number of blocks */
79 uint32_t blksz; /* block size */
80 uint32_t nblocks; /* number of blocks */
84 uint32_t last_blk; /* last blk no */
85 char *last_buf; /* last blk data */
86 int req_total; /* total requests */
87 int req_cached; /* cached requests */
90 static void g_uzip_done(struct bio *bp);
93 g_uzip_softc_free(struct g_uzip_softc *sc, struct g_geom *gp)
97 printf("%s: %d requests, %d cached\n",
98 gp->name, sc->req_total, sc->req_cached);
100 if (sc->offsets != NULL) {
101 free(sc->offsets, M_GEOM_UZIP);
104 mtx_destroy(&sc->last_mtx);
105 free(sc->last_buf, M_GEOM_UZIP);
106 free(sc, M_GEOM_UZIP);
110 z_alloc(void *nil, u_int type, u_int size)
114 ptr = malloc(type * size, M_GEOM_UZIP, M_NOWAIT);
120 z_free(void *nil, void *ptr)
123 free(ptr, M_GEOM_UZIP);
127 g_uzip_cached(struct g_geom *gp, struct bio *bp)
129 struct g_uzip_softc *sc;
131 size_t blk, blkofs, usz;
134 ofs = bp->bio_offset + bp->bio_completed;
135 blk = ofs / sc->blksz;
136 mtx_lock(&sc->last_mtx);
137 if (blk == sc->last_blk) {
138 blkofs = ofs % sc->blksz;
139 usz = sc->blksz - blkofs;
140 if (bp->bio_resid < usz)
142 memcpy(bp->bio_data + bp->bio_completed, sc->last_buf + blkofs,
145 mtx_unlock(&sc->last_mtx);
147 DPRINTF(("%s/%s: %p: offset=%jd: got %jd bytes from cache\n",
148 __func__, gp->name, bp, (intmax_t)ofs, (intmax_t)usz));
150 bp->bio_completed += usz;
151 bp->bio_resid -= usz;
153 if (bp->bio_resid == 0) {
158 mtx_unlock(&sc->last_mtx);
164 g_uzip_request(struct g_geom *gp, struct bio *bp)
166 struct g_uzip_softc *sc;
168 struct g_consumer *cp;
169 struct g_provider *pp;
171 size_t start_blk, end_blk;
173 if (g_uzip_cached(gp, bp) != 0)
178 bp2 = g_clone_bio(bp);
180 g_io_deliver(bp, ENOMEM);
183 bp2->bio_done = g_uzip_done;
185 cp = LIST_FIRST(&gp->consumer);
188 ofs = bp->bio_offset + bp->bio_completed;
189 start_blk = ofs / sc->blksz;
190 KASSERT(start_blk < sc->nblocks, ("start_blk out of range"));
191 end_blk = (ofs + bp->bio_resid + sc->blksz - 1) / sc->blksz;
192 KASSERT(end_blk <= sc->nblocks, ("end_blk out of range"));
194 DPRINTF(("%s/%s: %p: start=%u (%jd), end=%u (%jd)\n",
195 __func__, gp->name, bp,
196 (u_int)start_blk, (intmax_t)sc->offsets[start_blk],
197 (u_int)end_blk, (intmax_t)sc->offsets[end_blk]));
199 bp2->bio_offset = sc->offsets[start_blk] -
200 sc->offsets[start_blk] % pp->sectorsize;
202 bp2->bio_length = sc->offsets[end_blk] - bp2->bio_offset;
203 bp2->bio_length = (bp2->bio_length + pp->sectorsize - 1) /
204 pp->sectorsize * pp->sectorsize;
205 if (bp2->bio_length <= MAXPHYS)
211 bp2->bio_data = malloc(bp2->bio_length, M_GEOM_UZIP, M_NOWAIT);
212 if (bp2->bio_data == NULL) {
214 g_io_deliver(bp, ENOMEM);
218 DPRINTF(("%s/%s: %p: reading %jd bytes from offset %jd\n",
219 __func__, gp->name, bp,
220 (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset));
222 g_io_request(bp2, cp);
227 g_uzip_done(struct bio *bp)
231 struct g_provider *pp;
232 struct g_consumer *cp;
234 struct g_uzip_softc *sc;
237 size_t blk, blkofs, len, ulen;
239 bp2 = bp->bio_parent;
240 gp = bp2->bio_to->geom;
243 cp = LIST_FIRST(&gp->consumer);
246 bp2->bio_error = bp->bio_error;
247 if (bp2->bio_error != 0)
250 /* Make sure there's forward progress. */
251 if (bp->bio_completed == 0) {
252 bp2->bio_error = ECANCELED;
258 if (inflateInit(&zs) != Z_OK) {
259 bp2->bio_error = EILSEQ;
263 ofs = bp2->bio_offset + bp2->bio_completed;
264 blk = ofs / sc->blksz;
265 blkofs = ofs % sc->blksz;
266 data = bp->bio_data + sc->offsets[blk] % pp->sectorsize;
267 data2 = bp2->bio_data + bp2->bio_completed;
268 while (bp->bio_completed && bp2->bio_resid) {
269 ulen = MIN(sc->blksz - blkofs, bp2->bio_resid);
270 len = sc->offsets[blk + 1] - sc->offsets[blk];
271 DPRINTF(("%s/%s: %p/%ju: data2=%p, ulen=%u, data=%p, len=%u\n",
272 __func__, gp->name, gp, bp->bio_completed,
273 data2, (u_int)ulen, data, (u_int)len));
275 /* All zero block: no cache update */
277 } else if (len <= bp->bio_completed) {
280 zs.next_out = sc->last_buf;
281 zs.avail_out = sc->blksz;
282 mtx_lock(&sc->last_mtx);
283 if (inflate(&zs, Z_FINISH) != Z_STREAM_END) {
285 mtx_unlock(&sc->last_mtx);
287 bp2->bio_error = EILSEQ;
291 memcpy(data2, sc->last_buf + blkofs, ulen);
292 mtx_unlock(&sc->last_mtx);
293 if (inflateReset(&zs) != Z_OK) {
295 bp2->bio_error = EILSEQ;
303 bp2->bio_completed += ulen;
304 bp2->bio_resid -= ulen;
305 bp->bio_completed -= len;
310 if (inflateEnd(&zs) != Z_OK)
311 bp2->bio_error = EILSEQ;
314 /* Finish processing the request. */
315 free(bp->bio_data, M_GEOM_UZIP);
317 if (bp2->bio_error != 0 || bp2->bio_resid == 0)
318 g_io_deliver(bp2, bp2->bio_error);
320 g_uzip_request(gp, bp2);
324 g_uzip_start(struct bio *bp)
326 struct g_provider *pp;
328 struct g_uzip_softc *sc;
333 DPRINTF(("%s/%s: %p: cmd=%d, offset=%jd, length=%jd, buffer=%p\n",
334 __func__, gp->name, bp, bp->bio_cmd, (intmax_t)bp->bio_offset,
335 (intmax_t)bp->bio_length, bp->bio_data));
340 if (bp->bio_cmd != BIO_READ) {
341 g_io_deliver(bp, EOPNOTSUPP);
345 bp->bio_resid = bp->bio_length;
346 bp->bio_completed = 0;
348 g_uzip_request(gp, bp);
352 g_uzip_orphan(struct g_consumer *cp)
356 g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, cp->provider->name);
360 g_uzip_softc_free(gp->softc, gp);
362 g_wither_geom(gp, ENXIO);
366 g_uzip_access(struct g_provider *pp, int dr, int dw, int de)
369 struct g_consumer *cp;
372 cp = LIST_FIRST(&gp->consumer);
373 KASSERT (cp != NULL, ("g_uzip_access but no consumer"));
375 if (cp->acw + dw > 0)
378 return (g_access(cp, dr, dw, de));
382 g_uzip_spoiled(struct g_consumer *cp)
387 g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, gp->name);
390 g_uzip_softc_free(gp->softc, gp);
392 g_wither_geom(gp, ENXIO);
395 static struct g_geom *
396 g_uzip_taste(struct g_class *mp, struct g_provider *pp, int flags)
399 uint32_t i, total_offsets, offsets_read, blk;
401 struct cloop_header *header;
402 struct g_consumer *cp;
404 struct g_provider *pp2;
405 struct g_uzip_softc *sc;
407 g_trace(G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name);
410 /* Skip providers that are already open for writing. */
417 * Create geom instance.
419 gp = g_new_geomf(mp, "%s.uzip", pp->name);
420 cp = g_new_consumer(gp);
421 error = g_attach(cp, pp);
423 error = g_access(cp, 1, 0, 0);
426 g_destroy_consumer(cp);
433 * Read cloop header, look for CLOOP magic, perform
434 * other validity checks.
436 DPRINTF(("%s: media sectorsize %u, mediasize %jd\n",
437 gp->name, pp->sectorsize, (intmax_t)pp->mediasize));
438 buf = g_read_data(cp, 0, pp->sectorsize, NULL);
441 header = (struct cloop_header *) buf;
442 if (strncmp(header->magic, CLOOP_MAGIC_START,
443 sizeof(CLOOP_MAGIC_START) - 1) != 0) {
444 DPRINTF(("%s: no CLOOP magic\n", gp->name));
447 if (header->magic[0x0b] != 'V' || header->magic[0x0c] < '2') {
448 DPRINTF(("%s: image version too old\n", gp->name));
453 * Initialize softc and read offsets.
455 sc = malloc(sizeof(*sc), M_GEOM_UZIP, M_WAITOK | M_ZERO);
457 sc->blksz = ntohl(header->blksz);
458 sc->nblocks = ntohl(header->nblocks);
459 if (sc->blksz % 512 != 0) {
460 printf("%s: block size (%u) should be multiple of 512.\n",
461 gp->name, sc->blksz);
464 if (sc->blksz > MAX_BLKSZ) {
465 printf("%s: block size (%u) should not be larger than %d.\n",
466 gp->name, sc->blksz, MAX_BLKSZ);
468 total_offsets = sc->nblocks + 1;
469 if (sizeof(struct cloop_header) +
470 total_offsets * sizeof(uint64_t) > pp->mediasize) {
471 printf("%s: media too small for %u blocks\n",
472 gp->name, sc->nblocks);
475 sc->offsets = malloc(
476 total_offsets * sizeof(uint64_t), M_GEOM_UZIP, M_WAITOK);
477 offsets_read = MIN(total_offsets,
478 (pp->sectorsize - sizeof(*header)) / sizeof(uint64_t));
479 for (i = 0; i < offsets_read; i++)
480 sc->offsets[i] = be64toh(((uint64_t *) (header + 1))[i]);
481 DPRINTF(("%s: %u offsets in the first sector\n",
482 gp->name, offsets_read));
483 for (blk = 1; offsets_read < total_offsets; blk++) {
488 cp, blk * pp->sectorsize, pp->sectorsize, NULL);
491 nread = MIN(total_offsets - offsets_read,
492 pp->sectorsize / sizeof(uint64_t));
493 DPRINTF(("%s: %u offsets read from sector %d\n",
494 gp->name, nread, blk));
495 for (i = 0; i < nread; i++) {
496 sc->offsets[offsets_read + i] =
497 be64toh(((uint64_t *) buf)[i]);
499 offsets_read += nread;
502 DPRINTF(("%s: done reading offsets\n", gp->name));
503 mtx_init(&sc->last_mtx, "geom_uzip cache", NULL, MTX_DEF);
505 sc->last_buf = malloc(sc->blksz, M_GEOM_UZIP, M_WAITOK);
510 pp2 = g_new_providerf(gp, "%s", gp->name);
511 pp2->sectorsize = 512;
512 pp2->mediasize = (off_t)sc->nblocks * sc->blksz;
513 pp2->stripesize = pp->stripesize;
514 pp2->stripeoffset = pp->stripeoffset;
515 g_error_provider(pp2, 0);
516 g_access(cp, -1, 0, 0);
518 DPRINTF(("%s: taste ok (%d, %jd), (%d, %d), %x\n",
520 pp2->sectorsize, (intmax_t)pp2->mediasize,
521 pp2->stripeoffset, pp2->stripesize, pp2->flags));
522 printf("%s: %u x %u blocks\n", gp->name, sc->nblocks, sc->blksz);
527 g_access(cp, -1, 0, 0);
530 if (gp->softc != NULL) {
531 g_uzip_softc_free(gp->softc, NULL);
535 g_destroy_consumer(cp);
542 g_uzip_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
544 struct g_provider *pp;
546 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, gp->name);
549 if (gp->softc == NULL) {
550 printf("%s(%s): gp->softc == NULL\n", __func__, gp->name);
554 KASSERT(gp != NULL, ("NULL geom"));
555 pp = LIST_FIRST(&gp->provider);
556 KASSERT(pp != NULL, ("NULL provider"));
557 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)
560 g_uzip_softc_free(gp->softc, gp);
562 g_wither_geom(gp, ENXIO);
567 static struct g_class g_uzip_class = {
568 .name = UZIP_CLASS_NAME,
569 .version = G_VERSION,
570 .taste = g_uzip_taste,
571 .destroy_geom = g_uzip_destroy_geom,
573 .start = g_uzip_start,
574 .orphan = g_uzip_orphan,
575 .access = g_uzip_access,
576 .spoiled = g_uzip_spoiled,
579 DECLARE_GEOM_CLASS(g_uzip_class, g_uzip);
580 MODULE_DEPEND(g_uzip, zlib, 1, 1, 1);