2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * Copyright (c) 2009-2010 The FreeBSD Foundation
6 * Portions of this software were developed by Pawel Jakub Dawidek
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/fcntl.h>
41 #include <sys/linker.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
46 #include <sys/limits.h>
47 #include <sys/queue.h>
48 #include <sys/sysctl.h>
49 #include <sys/signalvar.h>
51 #include <machine/atomic.h>
53 #include <geom/geom.h>
54 #include <geom/gate/g_gate.h>
56 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
58 SYSCTL_DECL(_kern_geom);
59 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0,
60 "GEOM_GATE configuration");
61 static int g_gate_debug = 0;
62 TUNABLE_INT("kern.geom.gate.debug", &g_gate_debug);
63 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
65 static u_int g_gate_maxunits = 256;
66 TUNABLE_INT("kern.geom.gate.maxunits", &g_gate_maxunits);
67 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
68 &g_gate_maxunits, 0, "Maximum number of ggate devices");
70 struct g_class g_gate_class = {
71 .name = G_GATE_CLASS_NAME,
75 static struct cdev *status_dev;
76 static d_ioctl_t g_gate_ioctl;
77 static struct cdevsw g_gate_cdevsw = {
78 .d_version = D_VERSION,
79 .d_ioctl = g_gate_ioctl,
80 .d_name = G_GATE_CTL_NAME
84 static struct g_gate_softc **g_gate_units;
85 static u_int g_gate_nunits;
86 static struct mtx g_gate_units_lock;
89 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
91 struct g_provider *pp;
92 struct g_consumer *cp;
97 mtx_assert(&g_gate_units_lock, MA_OWNED);
99 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
100 mtx_unlock(&g_gate_units_lock);
103 mtx_unlock(&g_gate_units_lock);
104 mtx_lock(&sc->sc_queue_mtx);
105 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
106 sc->sc_flags |= G_GATE_FLAG_DESTROY;
108 mtx_unlock(&sc->sc_queue_mtx);
110 pp->flags |= G_PF_WITHER;
111 g_orphan_provider(pp, ENXIO);
112 callout_drain(&sc->sc_callout);
113 mtx_lock(&sc->sc_queue_mtx);
114 while ((bp = bioq_first(&sc->sc_inqueue)) != NULL) {
115 bioq_remove(&sc->sc_inqueue, bp);
116 sc->sc_queue_count--;
117 G_GATE_LOGREQ(1, bp, "Request canceled.");
118 g_io_deliver(bp, ENXIO);
120 while ((bp = bioq_first(&sc->sc_outqueue)) != NULL) {
121 bioq_remove(&sc->sc_outqueue, bp);
122 sc->sc_queue_count--;
123 G_GATE_LOGREQ(1, bp, "Request canceled.");
124 g_io_deliver(bp, ENXIO);
126 mtx_unlock(&sc->sc_queue_mtx);
128 mtx_lock(&g_gate_units_lock);
129 /* One reference is ours. */
131 while (sc->sc_ref > 0)
132 msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
133 g_gate_units[sc->sc_unit] = NULL;
134 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
136 mtx_unlock(&g_gate_units_lock);
137 mtx_destroy(&sc->sc_queue_mtx);
139 if ((cp = sc->sc_readcons) != NULL) {
140 sc->sc_readcons = NULL;
141 (void)g_access(cp, -1, 0, 0);
143 g_destroy_consumer(cp);
145 G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
147 g_wither_geom(gp, ENXIO);
148 sc->sc_provider = NULL;
154 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
156 struct g_gate_softc *sc;
158 if (dr <= 0 && dw <= 0 && de <= 0)
160 sc = pp->geom->softc;
161 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
163 /* XXX: Hack to allow read-only mounts. */
165 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
168 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
174 g_gate_queue_io(struct bio *bp)
176 struct g_gate_softc *sc;
178 sc = bp->bio_to->geom->softc;
179 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
180 g_io_deliver(bp, ENXIO);
184 mtx_lock(&sc->sc_queue_mtx);
186 if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
187 mtx_unlock(&sc->sc_queue_mtx);
188 G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
189 g_io_deliver(bp, ENOMEM);
193 bp->bio_driver1 = (void *)sc->sc_seq;
195 sc->sc_queue_count++;
197 bioq_insert_tail(&sc->sc_inqueue, bp);
200 mtx_unlock(&sc->sc_queue_mtx);
204 g_gate_done(struct bio *cbp)
208 pbp = cbp->bio_parent;
209 if (cbp->bio_error == 0) {
210 pbp->bio_completed = cbp->bio_completed;
213 g_io_deliver(pbp, 0);
215 /* If direct read failed, pass it through userland daemon. */
218 g_gate_queue_io(pbp);
223 g_gate_start(struct bio *pbp)
225 struct g_gate_softc *sc;
227 sc = pbp->bio_to->geom->softc;
228 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
229 g_io_deliver(pbp, ENXIO);
232 G_GATE_LOGREQ(2, pbp, "Request received.");
233 switch (pbp->bio_cmd) {
235 if (sc->sc_readcons != NULL) {
238 cbp = g_clone_bio(pbp);
240 g_io_deliver(pbp, ENOMEM);
243 cbp->bio_done = g_gate_done;
244 cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
245 cbp->bio_to = sc->sc_readcons->provider;
246 g_io_request(cbp, sc->sc_readcons);
253 /* XXX: Hack to allow read-only mounts. */
254 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
255 g_io_deliver(pbp, EPERM);
261 G_GATE_LOGREQ(2, pbp, "Ignoring request.");
262 g_io_deliver(pbp, EOPNOTSUPP);
266 g_gate_queue_io(pbp);
269 static struct g_gate_softc *
270 g_gate_hold(int unit, const char *name)
272 struct g_gate_softc *sc = NULL;
274 mtx_lock(&g_gate_units_lock);
275 if (unit >= 0 && unit < g_gate_maxunits)
276 sc = g_gate_units[unit];
277 else if (unit == G_GATE_NAME_GIVEN) {
278 KASSERT(name != NULL, ("name is NULL"));
279 for (unit = 0; unit < g_gate_maxunits; unit++) {
280 if (g_gate_units[unit] == NULL)
283 g_gate_units[unit]->sc_provider->name) != 0) {
286 sc = g_gate_units[unit];
292 mtx_unlock(&g_gate_units_lock);
297 g_gate_release(struct g_gate_softc *sc)
300 g_topology_assert_not();
301 mtx_lock(&g_gate_units_lock);
303 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
304 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
306 mtx_unlock(&g_gate_units_lock);
310 g_gate_getunit(int unit, int *errorp)
313 mtx_assert(&g_gate_units_lock, MA_OWNED);
315 if (unit >= g_gate_maxunits)
317 else if (g_gate_units[unit] == NULL)
322 for (unit = 0; unit < g_gate_maxunits; unit++) {
323 if (g_gate_units[unit] == NULL)
332 g_gate_guard(void *arg)
334 struct g_gate_softc *sc;
335 struct bintime curtime;
336 struct bio *bp, *bp2;
340 g_gate_hold(sc->sc_unit, NULL);
341 mtx_lock(&sc->sc_queue_mtx);
342 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
343 if (curtime.sec - bp->bio_t0.sec < 5)
345 bioq_remove(&sc->sc_inqueue, bp);
346 sc->sc_queue_count--;
347 G_GATE_LOGREQ(1, bp, "Request timeout.");
348 g_io_deliver(bp, EIO);
350 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
351 if (curtime.sec - bp->bio_t0.sec < 5)
353 bioq_remove(&sc->sc_outqueue, bp);
354 sc->sc_queue_count--;
355 G_GATE_LOGREQ(1, bp, "Request timeout.");
356 g_io_deliver(bp, EIO);
358 mtx_unlock(&sc->sc_queue_mtx);
359 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
360 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
367 g_gate_orphan(struct g_consumer *cp)
369 struct g_gate_softc *sc;
377 KASSERT(cp == sc->sc_readcons, ("cp=%p sc_readcons=%p", cp,
379 sc->sc_readcons = NULL;
380 G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
382 (void)g_access(cp, -1, 0, 0);
384 g_destroy_consumer(cp);
388 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
389 struct g_consumer *cp, struct g_provider *pp)
391 struct g_gate_softc *sc;
394 if (sc == NULL || pp != NULL || cp != NULL)
396 sc = g_gate_hold(sc->sc_unit, NULL);
399 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
400 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
401 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
402 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
405 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
408 if (sc->sc_readcons != NULL) {
409 sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n",
410 indent, (intmax_t)sc->sc_readoffset);
411 sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n",
412 indent, sc->sc_readcons->provider->name);
414 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
415 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
416 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
418 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
420 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
421 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
428 g_gate_create(struct g_gate_ctl_create *ggio)
430 struct g_gate_softc *sc;
432 struct g_provider *pp, *ropp;
433 struct g_consumer *cp;
437 if (ggio->gctl_mediasize <= 0) {
438 G_GATE_DEBUG(1, "Invalid media size.");
441 if (ggio->gctl_sectorsize <= 0) {
442 G_GATE_DEBUG(1, "Invalid sector size.");
445 if (!powerof2(ggio->gctl_sectorsize)) {
446 G_GATE_DEBUG(1, "Invalid sector size.");
449 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
450 G_GATE_DEBUG(1, "Invalid media size.");
453 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
454 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
455 G_GATE_DEBUG(1, "Invalid flags.");
458 if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
459 ggio->gctl_unit != G_GATE_NAME_GIVEN &&
460 ggio->gctl_unit < 0) {
461 G_GATE_DEBUG(1, "Invalid unit number.");
464 if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
465 ggio->gctl_name[0] == '\0') {
466 G_GATE_DEBUG(1, "No device name.");
470 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
471 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
472 strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
474 bioq_init(&sc->sc_inqueue);
475 bioq_init(&sc->sc_outqueue);
476 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
477 sc->sc_queue_count = 0;
478 sc->sc_queue_size = ggio->gctl_maxcount;
479 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
480 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
481 sc->sc_timeout = ggio->gctl_timeout;
482 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
484 mtx_lock(&g_gate_units_lock);
485 sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
488 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
489 snprintf(name, sizeof(name), "%s", ggio->gctl_name);
491 snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
494 /* Check for name collision. */
495 for (unit = 0; unit < g_gate_maxunits; unit++) {
496 if (g_gate_units[unit] == NULL)
498 if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
504 g_gate_units[sc->sc_unit] = sc;
506 mtx_unlock(&g_gate_units_lock);
510 if (ggio->gctl_readprov[0] == '\0') {
513 ropp = g_provider_by_name(ggio->gctl_readprov);
515 G_GATE_DEBUG(1, "Provider %s doesn't exist.",
516 ggio->gctl_readprov);
520 if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) {
521 G_GATE_DEBUG(1, "Invalid read offset.");
525 if (ggio->gctl_mediasize + ggio->gctl_readoffset >
527 G_GATE_DEBUG(1, "Invalid read offset or media size.");
533 gp = g_new_geomf(&g_gate_class, "%s", name);
534 gp->start = g_gate_start;
535 gp->access = g_gate_access;
536 gp->orphan = g_gate_orphan;
537 gp->dumpconf = g_gate_dumpconf;
541 cp = g_new_consumer(gp);
542 error = g_attach(cp, ropp);
544 G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name);
547 error = g_access(cp, 1, 0, 0);
549 G_GATE_DEBUG(1, "Unable to access %s.", ropp->name);
553 sc->sc_readcons = cp;
554 sc->sc_readoffset = ggio->gctl_readoffset;
557 ggio->gctl_unit = sc->sc_unit;
559 pp = g_new_providerf(gp, "%s", name);
560 pp->mediasize = ggio->gctl_mediasize;
561 pp->sectorsize = ggio->gctl_sectorsize;
562 sc->sc_provider = pp;
563 g_error_provider(pp, 0);
566 mtx_lock(&g_gate_units_lock);
567 sc->sc_name = sc->sc_provider->name;
568 mtx_unlock(&g_gate_units_lock);
569 G_GATE_DEBUG(1, "Device %s created.", gp->name);
571 if (sc->sc_timeout > 0) {
572 callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
577 g_destroy_consumer(cp);
581 mtx_lock(&g_gate_units_lock);
582 g_gate_units[sc->sc_unit] = NULL;
583 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
586 mtx_unlock(&g_gate_units_lock);
587 mtx_destroy(&sc->sc_queue_mtx);
593 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
595 struct g_provider *pp;
596 struct g_consumer *cp;
599 if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
600 if (ggio->gctl_mediasize <= 0) {
601 G_GATE_DEBUG(1, "Invalid media size.");
604 pp = sc->sc_provider;
605 if ((ggio->gctl_mediasize % pp->sectorsize) != 0) {
606 G_GATE_DEBUG(1, "Invalid media size.");
613 if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0)
614 (void)strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
618 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
620 if (sc->sc_readcons != NULL) {
621 cp = sc->sc_readcons;
622 sc->sc_readcons = NULL;
623 (void)g_access(cp, -1, 0, 0);
625 g_destroy_consumer(cp);
627 if (ggio->gctl_readprov[0] != '\0') {
628 pp = g_provider_by_name(ggio->gctl_readprov);
631 G_GATE_DEBUG(1, "Provider %s doesn't exist.",
632 ggio->gctl_readprov);
635 cp = g_new_consumer(sc->sc_provider->geom);
636 error = g_attach(cp, pp);
638 G_GATE_DEBUG(1, "Unable to attach to %s.",
641 error = g_access(cp, 1, 0, 0);
643 G_GATE_DEBUG(1, "Unable to access %s.",
649 g_destroy_consumer(cp);
655 cp = sc->sc_readcons;
658 if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) {
660 G_GATE_DEBUG(1, "No read provider.");
663 pp = sc->sc_provider;
664 if ((ggio->gctl_readoffset % pp->sectorsize) != 0) {
665 G_GATE_DEBUG(1, "Invalid read offset.");
668 if (pp->mediasize + ggio->gctl_readoffset >
669 cp->provider->mediasize) {
670 G_GATE_DEBUG(1, "Invalid read offset or media size.");
673 sc->sc_readoffset = ggio->gctl_readoffset;
676 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
677 sc->sc_readcons = cp;
684 #define G_GATE_CHECK_VERSION(ggio) do { \
685 if ((ggio)->gctl_version != G_GATE_VERSION) { \
686 printf("Version mismatch %d != %d.\n", \
687 ggio->gctl_version, G_GATE_VERSION); \
692 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
694 struct g_gate_softc *sc;
698 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
702 case G_GATE_CMD_CREATE:
704 struct g_gate_ctl_create *ggio = (void *)addr;
706 G_GATE_CHECK_VERSION(ggio);
707 error = g_gate_create(ggio);
709 * Reset TDP_GEOM flag.
710 * There are pending events for sure, because we just created
711 * new provider and other classes want to taste it, but we
712 * cannot answer on I/O requests until we're here.
714 td->td_pflags &= ~TDP_GEOM;
717 case G_GATE_CMD_MODIFY:
719 struct g_gate_ctl_modify *ggio = (void *)addr;
721 G_GATE_CHECK_VERSION(ggio);
722 sc = g_gate_hold(ggio->gctl_unit, NULL);
725 error = g_gate_modify(sc, ggio);
729 case G_GATE_CMD_DESTROY:
731 struct g_gate_ctl_destroy *ggio = (void *)addr;
733 G_GATE_CHECK_VERSION(ggio);
734 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
738 mtx_lock(&g_gate_units_lock);
739 error = g_gate_destroy(sc, ggio->gctl_force);
745 case G_GATE_CMD_CANCEL:
747 struct g_gate_ctl_cancel *ggio = (void *)addr;
748 struct bio *tbp, *lbp;
750 G_GATE_CHECK_VERSION(ggio);
751 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
755 mtx_lock(&sc->sc_queue_mtx);
756 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
757 if (ggio->gctl_seq == 0 ||
758 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
759 G_GATE_LOGREQ(1, bp, "Request canceled.");
760 bioq_remove(&sc->sc_outqueue, bp);
762 * Be sure to put requests back onto incoming
763 * queue in the proper order.
766 bioq_insert_head(&sc->sc_inqueue, bp);
768 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
773 * If only one request was canceled, leave now.
775 if (ggio->gctl_seq != 0)
779 if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
780 ggio->gctl_unit = sc->sc_unit;
781 mtx_unlock(&sc->sc_queue_mtx);
785 case G_GATE_CMD_START:
787 struct g_gate_ctl_io *ggio = (void *)addr;
789 G_GATE_CHECK_VERSION(ggio);
790 sc = g_gate_hold(ggio->gctl_unit, NULL);
795 mtx_lock(&sc->sc_queue_mtx);
796 bp = bioq_first(&sc->sc_inqueue);
799 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
800 ggio->gctl_error = ECANCELED;
801 mtx_unlock(&sc->sc_queue_mtx);
804 if (msleep(sc, &sc->sc_queue_mtx,
805 PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
806 ggio->gctl_error = ECANCELED;
810 ggio->gctl_cmd = bp->bio_cmd;
811 if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
812 bp->bio_length > ggio->gctl_length) {
813 mtx_unlock(&sc->sc_queue_mtx);
814 ggio->gctl_length = bp->bio_length;
815 ggio->gctl_error = ENOMEM;
818 bioq_remove(&sc->sc_inqueue, bp);
819 bioq_insert_tail(&sc->sc_outqueue, bp);
820 mtx_unlock(&sc->sc_queue_mtx);
822 ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
823 ggio->gctl_offset = bp->bio_offset;
824 ggio->gctl_length = bp->bio_length;
826 switch (bp->bio_cmd) {
832 error = copyout(bp->bio_data, ggio->gctl_data,
835 mtx_lock(&sc->sc_queue_mtx);
836 bioq_remove(&sc->sc_outqueue, bp);
837 bioq_insert_head(&sc->sc_inqueue, bp);
838 mtx_unlock(&sc->sc_queue_mtx);
847 case G_GATE_CMD_DONE:
849 struct g_gate_ctl_io *ggio = (void *)addr;
851 G_GATE_CHECK_VERSION(ggio);
852 sc = g_gate_hold(ggio->gctl_unit, NULL);
856 mtx_lock(&sc->sc_queue_mtx);
857 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
858 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
862 bioq_remove(&sc->sc_outqueue, bp);
863 sc->sc_queue_count--;
865 mtx_unlock(&sc->sc_queue_mtx);
868 * Request was probably canceled.
872 if (ggio->gctl_error == EAGAIN) {
874 G_GATE_LOGREQ(1, bp, "Request desisted.");
875 mtx_lock(&sc->sc_queue_mtx);
876 sc->sc_queue_count++;
877 bioq_insert_head(&sc->sc_inqueue, bp);
879 mtx_unlock(&sc->sc_queue_mtx);
881 bp->bio_error = ggio->gctl_error;
882 if (bp->bio_error == 0) {
883 bp->bio_completed = bp->bio_length;
884 switch (bp->bio_cmd) {
886 error = copyin(ggio->gctl_data,
887 bp->bio_data, bp->bio_length);
889 bp->bio_error = error;
897 G_GATE_LOGREQ(2, bp, "Request done.");
898 g_io_deliver(bp, bp->bio_error);
912 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
917 g_gate_modevent(module_t mod, int type, void *data)
923 mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
924 g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
925 M_GATE, M_WAITOK | M_ZERO);
930 mtx_lock(&g_gate_units_lock);
931 if (g_gate_nunits > 0) {
932 mtx_unlock(&g_gate_units_lock);
936 mtx_unlock(&g_gate_units_lock);
937 mtx_destroy(&g_gate_units_lock);
939 destroy_dev(status_dev);
940 free(g_gate_units, M_GATE);
949 static moduledata_t g_gate_module = {
954 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
955 DECLARE_GEOM_CLASS(g_gate_class, g_gate);