2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2002 Poul-Henning Kamp
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
8 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
9 * and NAI Labs, the Security Research Division of Network Associates, Inc.
10 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11 * DARPA CHATS research program.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The names of the authors may not be used to endorse or promote
22 * products derived from this software without specific prior written
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
46 #include <sys/ctype.h>
48 #include <sys/devctl.h>
50 #include <sys/mutex.h>
52 #include <sys/errno.h>
55 #include <sys/fcntl.h>
56 #include <sys/limits.h>
57 #include <sys/selinfo.h>
58 #include <sys/sysctl.h>
59 #include <geom/geom.h>
60 #include <geom/geom_int.h>
61 #include <machine/stdarg.h>
66 struct cdev *sc_alias;
69 struct selinfo sc_selinfo;
70 #define SC_A_DESTROY (1 << 31)
71 #define SC_A_OPEN (1 << 30)
72 #define SC_A_ACTIVE (SC_A_OPEN - 1)
75 static d_open_t g_dev_open;
76 static d_close_t g_dev_close;
77 static d_strategy_t g_dev_strategy;
78 static d_ioctl_t g_dev_ioctl;
79 static d_kqfilter_t g_dev_kqfilter;
81 static void gdev_filter_detach(struct knote *kn);
82 static int gdev_filter_vnode(struct knote *kn, long hint);
84 static struct filterops gdev_filterops_vnode = {
86 .f_detach = gdev_filter_detach,
87 .f_event = gdev_filter_vnode,
90 static struct cdevsw g_dev_cdevsw = {
91 .d_version = D_VERSION,
93 .d_close = g_dev_close,
96 .d_ioctl = g_dev_ioctl,
97 .d_strategy = g_dev_strategy,
99 .d_flags = D_DISK | D_TRACKCLOSE,
100 .d_kqfilter = g_dev_kqfilter,
103 static g_init_t g_dev_init;
104 static g_fini_t g_dev_fini;
105 static g_taste_t g_dev_taste;
106 static g_orphan_t g_dev_orphan;
107 static g_attrchanged_t g_dev_attrchanged;
108 static g_resize_t g_dev_resize;
110 static struct g_class g_dev_class = {
112 .version = G_VERSION,
115 .taste = g_dev_taste,
116 .orphan = g_dev_orphan,
117 .attrchanged = g_dev_attrchanged,
118 .resize = g_dev_resize
122 * We target 262144 (8 x 32768) sectors by default as this significantly
123 * increases the throughput on commonly used SSD's with a marginal
124 * increase in non-interruptible request latency.
126 static uint64_t g_dev_del_max_sectors = 262144;
127 SYSCTL_DECL(_kern_geom);
128 SYSCTL_NODE(_kern_geom, OID_AUTO, dev, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
130 SYSCTL_QUAD(_kern_geom_dev, OID_AUTO, delete_max_sectors, CTLFLAG_RW,
131 &g_dev_del_max_sectors, 0, "Maximum number of sectors in a single "
132 "delete request sent to the provider. Larger requests are chunked "
133 "so they can be interrupted. (0 = disable chunking)");
135 static char *dumpdev = NULL;
137 g_dev_init(struct g_class *mp)
140 dumpdev = kern_getenv("dumpdev");
144 g_dev_fini(struct g_class *mp)
152 g_dev_setdumpdev(struct cdev *dev, struct diocskerneldump_arg *kda)
154 struct g_kerneldump kd;
155 struct g_consumer *cp;
158 MPASS(dev != NULL && kda != NULL);
159 MPASS(kda->kda_index != KDA_REMOVE);
166 error = g_io_getattr("GEOM::kerneldump", cp, &len, &kd);
170 error = dumper_insert(&kd.di, devtoname(dev), kda);
172 dev->si_flags |= SI_DUMPDEV;
178 init_dumpdev(struct cdev *dev)
180 struct diocskerneldump_arg kda;
181 struct g_consumer *cp;
182 const char *devprefix = _PATH_DEV, *devname;
186 bzero(&kda, sizeof(kda));
187 kda.kda_index = KDA_APPEND;
192 len = strlen(devprefix);
193 devname = devtoname(dev);
194 if (strcmp(devname, dumpdev) != 0 &&
195 (strncmp(dumpdev, devprefix, len) != 0 ||
196 strcmp(devname, dumpdev + len) != 0))
199 cp = (struct g_consumer *)dev->si_drv2;
200 error = g_access(cp, 1, 0, 0);
204 error = g_dev_setdumpdev(dev, &kda);
210 (void)g_access(cp, -1, 0, 0);
216 g_dev_destroy(void *arg, int flags __unused)
218 struct g_consumer *cp;
220 struct g_dev_softc *sc;
221 char buf[SPECNAMELEN + 6];
227 g_trace(G_T_TOPOLOGY, "g_dev_destroy(%p(%s))", cp, gp->name);
228 snprintf(buf, sizeof(buf), "cdev=%s", gp->name);
229 devctl_notify("GEOM", "DEV", "DESTROY", buf);
230 knlist_clear(&sc->sc_selinfo.si_note, 0);
231 knlist_destroy(&sc->sc_selinfo.si_note);
232 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
233 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
235 g_destroy_consumer(cp);
237 mtx_destroy(&sc->sc_mtx);
247 LIST_FOREACH(gp, &g_dev_class.geom, geom) {
248 printf("%s%s", p, gp->name);
255 g_dev_set_physpath(struct g_consumer *cp)
257 struct g_dev_softc *sc;
259 int error, physpath_len;
261 if (g_access(cp, 1, 0, 0) != 0)
265 physpath_len = MAXPATHLEN;
266 physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
267 error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
268 g_access(cp, -1, 0, 0);
269 if (error == 0 && strlen(physpath) != 0) {
270 struct cdev *dev, *old_alias_dev;
271 struct cdev **alias_devp;
274 old_alias_dev = sc->sc_alias;
275 alias_devp = (struct cdev **)&sc->sc_alias;
276 make_dev_physpath_alias(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME,
277 alias_devp, dev, old_alias_dev, physpath);
278 } else if (sc->sc_alias) {
279 destroy_dev((struct cdev *)sc->sc_alias);
286 g_dev_set_media(struct g_consumer *cp)
288 struct g_dev_softc *sc;
290 char buf[SPECNAMELEN + 6];
294 snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
295 devctl_notify("DEVFS", "CDEV", "MEDIACHANGE", buf);
296 devctl_notify("GEOM", "DEV", "MEDIACHANGE", buf);
299 snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
300 devctl_notify("DEVFS", "CDEV", "MEDIACHANGE", buf);
301 devctl_notify("GEOM", "DEV", "MEDIACHANGE", buf);
306 g_dev_attrchanged(struct g_consumer *cp, const char *attr)
309 if (strcmp(attr, "GEOM::media") == 0) {
314 if (strcmp(attr, "GEOM::physpath") == 0) {
315 g_dev_set_physpath(cp);
321 g_dev_resize(struct g_consumer *cp)
323 struct g_dev_softc *sc;
324 char buf[SPECNAMELEN + 6];
327 KNOTE_UNLOCKED(&sc->sc_selinfo.si_note, NOTE_ATTRIB);
329 snprintf(buf, sizeof(buf), "cdev=%s", cp->provider->name);
330 devctl_notify("GEOM", "DEV", "SIZECHANGE", buf);
334 g_dev_getprovider(struct cdev *dev)
336 struct g_consumer *cp;
341 if (dev->si_devsw != &g_dev_cdevsw)
344 return (cp->provider);
347 static struct g_geom *
348 g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
351 struct g_geom_alias *gap;
352 struct g_consumer *cp;
353 struct g_dev_softc *sc;
355 struct cdev *dev, *adev;
356 char buf[SPECNAMELEN + 6];
357 struct make_dev_args args;
359 g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
361 gp = g_new_geomf(mp, "%s", pp->name);
362 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
363 mtx_init(&sc->sc_mtx, "g_dev", NULL, MTX_DEF);
364 cp = g_new_consumer(gp);
366 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
367 error = g_attach(cp, pp);
369 printf("%s: g_dev_taste(%s) failed to g_attach, error=%d\n",
370 __func__, pp->name, error);
371 g_destroy_consumer(cp);
373 mtx_destroy(&sc->sc_mtx);
377 make_dev_args_init(&args);
378 args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
379 args.mda_devsw = &g_dev_cdevsw;
381 args.mda_uid = UID_ROOT;
382 args.mda_gid = GID_OPERATOR;
383 args.mda_mode = 0640;
384 args.mda_si_drv1 = sc;
385 args.mda_si_drv2 = cp;
386 error = make_dev_s(&args, &sc->sc_dev, "%s", gp->name);
388 printf("%s: make_dev_p() failed (gp->name=%s, error=%d)\n",
389 __func__, gp->name, error);
391 g_destroy_consumer(cp);
393 mtx_destroy(&sc->sc_mtx);
398 dev->si_flags |= SI_UNMAPPED;
399 dev->si_iosize_max = maxphys;
400 knlist_init_mtx(&sc->sc_selinfo.si_note, &sc->sc_mtx);
401 error = init_dumpdev(dev);
403 printf("%s: init_dumpdev() failed (gp->name=%s, error=%d)\n",
404 __func__, gp->name, error);
406 g_dev_attrchanged(cp, "GEOM::physpath");
407 snprintf(buf, sizeof(buf), "cdev=%s", gp->name);
408 devctl_notify("GEOM", "DEV", "CREATE", buf);
410 * Now add all the aliases for this drive
412 LIST_FOREACH(gap, &pp->aliases, ga_next) {
413 error = make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &adev, dev,
414 "%s", gap->ga_alias);
416 printf("%s: make_dev_alias_p() failed (name=%s, error=%d)\n",
417 __func__, gap->ga_alias, error);
420 snprintf(buf, sizeof(buf), "cdev=%s", gap->ga_alias);
421 devctl_notify("GEOM", "DEV", "CREATE", buf);
428 g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
430 struct g_consumer *cp;
431 struct g_dev_softc *sc;
435 g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)",
436 cp->geom->name, flags, fmt, td);
438 r = flags & FREAD ? 1 : 0;
439 w = flags & FWRITE ? 1 : 0;
441 e = flags & O_EXCL ? 1 : 0;
447 * This happens on attempt to open a device node with O_EXEC.
454 * When running in very secure mode, do not allow
455 * opens for writing of any disks.
457 error = securelevel_ge(td->td_ucred, 2);
462 error = g_access(cp, r, w, e);
466 mtx_lock(&sc->sc_mtx);
467 if (sc->sc_open == 0 && (sc->sc_active & SC_A_ACTIVE) != 0)
468 wakeup(&sc->sc_active);
469 sc->sc_open += r + w + e;
470 if (sc->sc_open == 0)
471 atomic_clear_int(&sc->sc_active, SC_A_OPEN);
473 atomic_set_int(&sc->sc_active, SC_A_OPEN);
474 KNOTE_LOCKED(&sc->sc_selinfo.si_note, NOTE_OPEN);
475 mtx_unlock(&sc->sc_mtx);
481 g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
483 struct g_consumer *cp;
484 struct g_dev_softc *sc;
488 g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)",
489 cp->geom->name, flags, fmt, td);
491 r = flags & FREAD ? -1 : 0;
492 w = flags & FWRITE ? -1 : 0;
494 e = flags & O_EXCL ? -1 : 0;
500 * The vgonel(9) - caused by eg. forced unmount of devfs - calls
501 * VOP_CLOSE(9) on devfs vnode without any FREAD or FWRITE flags,
502 * which would result in zero deltas, which in turn would cause
503 * panic in g_access(9).
505 * Note that we cannot zero the counters (ie. do "r = cp->acr"
506 * etc) instead, because the consumer might be opened in another
513 mtx_lock(&sc->sc_mtx);
514 sc->sc_open += r + w + e;
515 if (sc->sc_open == 0)
516 atomic_clear_int(&sc->sc_active, SC_A_OPEN);
518 atomic_set_int(&sc->sc_active, SC_A_OPEN);
519 while (sc->sc_open == 0 && (sc->sc_active & SC_A_ACTIVE) != 0)
520 msleep(&sc->sc_active, &sc->sc_mtx, 0, "g_dev_close", hz / 10);
521 KNOTE_LOCKED(&sc->sc_selinfo.si_note, NOTE_CLOSE | (w ? NOTE_CLOSE_WRITE : 0));
522 mtx_unlock(&sc->sc_mtx);
524 error = g_access(cp, r, w, e);
530 g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
532 struct g_consumer *cp;
533 struct g_provider *pp;
534 off_t offset, length, chunk, odd;
540 /* If consumer or provider is dying, don't disturb. */
541 if (cp->flags & G_CF_ORPHAN)
547 KASSERT(cp->acr || cp->acw,
548 ("Consumer with zero access count in g_dev_ioctl"));
550 i = IOCPARM_LEN(cmd);
552 case DIOCGSECTORSIZE:
553 *(u_int *)data = pp->sectorsize;
554 if (*(u_int *)data == 0)
558 *(off_t *)data = pp->mediasize;
559 if (*(off_t *)data == 0)
563 error = g_io_getattr("GEOM::fwsectors", cp, &i, data);
564 if (error == 0 && *(u_int *)data == 0)
568 error = g_io_getattr("GEOM::fwheads", cp, &i, data);
569 if (error == 0 && *(u_int *)data == 0)
572 case DIOCSKERNELDUMP:
574 struct diocskerneldump_arg *kda;
575 uint8_t *encryptedkey;
577 kda = (struct diocskerneldump_arg *)data;
578 if (kda->kda_index == KDA_REMOVE_ALL ||
579 kda->kda_index == KDA_REMOVE_DEV ||
580 kda->kda_index == KDA_REMOVE) {
581 error = dumper_remove(devtoname(dev), kda);
582 explicit_bzero(kda, sizeof(*kda));
586 if (kda->kda_encryption != KERNELDUMP_ENC_NONE) {
587 if (kda->kda_encryptedkeysize == 0 ||
588 kda->kda_encryptedkeysize >
589 KERNELDUMP_ENCKEY_MAX_SIZE) {
590 explicit_bzero(kda, sizeof(*kda));
593 encryptedkey = malloc(kda->kda_encryptedkeysize, M_TEMP,
595 error = copyin(kda->kda_encryptedkey, encryptedkey,
596 kda->kda_encryptedkeysize);
601 kda->kda_encryptedkey = encryptedkey;
602 error = g_dev_setdumpdev(dev, kda);
604 zfree(encryptedkey, M_TEMP);
605 explicit_bzero(kda, sizeof(*kda));
609 error = g_io_flush(cp);
612 offset = ((off_t *)data)[0];
613 length = ((off_t *)data)[1];
614 if ((offset % pp->sectorsize) != 0 ||
615 (length % pp->sectorsize) != 0 || length <= 0) {
616 printf("%s: offset=%jd length=%jd\n", __func__, offset,
623 if (g_dev_del_max_sectors != 0 &&
624 chunk > g_dev_del_max_sectors * pp->sectorsize) {
625 chunk = g_dev_del_max_sectors * pp->sectorsize;
626 if (pp->stripesize > 0) {
627 odd = (offset + chunk +
628 pp->stripeoffset) % pp->stripesize;
633 error = g_delete_data(cp, offset, chunk);
639 * Since the request size can be large, the service
640 * time can be is likewise. We make this ioctl
641 * interruptible by checking for signals for each bio.
648 error = g_io_getattr("GEOM::ident", cp, &i, data);
650 case DIOCGPROVIDERNAME:
651 strlcpy(data, pp->name, i);
653 case DIOCGSTRIPESIZE:
654 *(off_t *)data = pp->stripesize;
656 case DIOCGSTRIPEOFFSET:
657 *(off_t *)data = pp->stripeoffset;
660 error = g_io_getattr("GEOM::physpath", cp, &i, data);
661 if (error == 0 && *(char *)data == '\0')
665 struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
667 if (arg->len > sizeof(arg->value)) {
671 error = g_io_getattr(arg->name, cp, &arg->len, &arg->value);
675 struct disk_zone_args *zone_args =(struct disk_zone_args *)data;
676 struct disk_zone_rep_entry *new_entries, *old_entries;
677 struct disk_zone_report *rep;
685 if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES) {
686 rep = &zone_args->zone_params.report;
687 #define MAXENTRIES (maxphys / sizeof(struct disk_zone_rep_entry))
688 if (rep->entries_allocated > MAXENTRIES)
689 rep->entries_allocated = MAXENTRIES;
690 alloc_size = rep->entries_allocated *
691 sizeof(struct disk_zone_rep_entry);
693 new_entries = g_malloc(alloc_size,
695 old_entries = rep->entries;
696 rep->entries = new_entries;
698 error = g_io_zonecmd(zone_args, cp);
699 if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES &&
700 alloc_size != 0 && error == 0)
701 error = copyout(new_entries, old_entries, alloc_size);
702 if (old_entries != NULL && rep != NULL)
703 rep->entries = old_entries;
704 if (new_entries != NULL)
709 if (pp->geom->ioctl != NULL) {
710 error = pp->geom->ioctl(pp, cmd, data, fflag, td);
720 g_dev_done(struct bio *bp2)
722 struct g_consumer *cp;
723 struct g_dev_softc *sc;
729 bp = bp2->bio_parent;
730 bp->bio_error = bp2->bio_error;
731 bp->bio_completed = bp2->bio_completed;
732 bp->bio_resid = bp->bio_length - bp2->bio_completed;
733 if (bp2->bio_cmd == BIO_ZONE)
734 bcopy(&bp2->bio_zone, &bp->bio_zone, sizeof(bp->bio_zone));
736 if (bp2->bio_error != 0) {
737 g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
738 bp2, bp2->bio_error);
739 bp->bio_flags |= BIO_ERROR;
741 if (bp->bio_cmd == BIO_READ)
742 KNOTE_UNLOCKED(&sc->sc_selinfo.si_note, NOTE_READ);
743 if (bp->bio_cmd == BIO_WRITE)
744 KNOTE_UNLOCKED(&sc->sc_selinfo.si_note, NOTE_WRITE);
745 g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd",
746 bp2, bp, bp2->bio_resid, (intmax_t)bp2->bio_completed);
749 active = atomic_fetchadd_int(&sc->sc_active, -1) - 1;
750 if ((active & SC_A_ACTIVE) == 0) {
751 if ((active & SC_A_OPEN) == 0)
752 wakeup(&sc->sc_active);
753 if (active & SC_A_DESTROY)
754 g_post_event(g_dev_destroy, cp, M_NOWAIT, NULL);
760 g_dev_strategy(struct bio *bp)
762 struct g_consumer *cp;
765 struct g_dev_softc *sc;
767 KASSERT(bp->bio_cmd == BIO_READ ||
768 bp->bio_cmd == BIO_WRITE ||
769 bp->bio_cmd == BIO_DELETE ||
770 bp->bio_cmd == BIO_FLUSH ||
771 bp->bio_cmd == BIO_ZONE,
772 ("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
775 KASSERT(cp->acr || cp->acw,
776 ("Consumer with zero access count in g_dev_strategy"));
777 biotrack(bp, __func__);
779 if ((bp->bio_offset % cp->provider->sectorsize) != 0 ||
780 (bp->bio_bcount % cp->provider->sectorsize) != 0) {
781 bp->bio_resid = bp->bio_bcount;
782 biofinish(bp, NULL, EINVAL);
787 KASSERT(sc->sc_open > 0, ("Closed device in g_dev_strategy"));
788 atomic_add_int(&sc->sc_active, 1);
792 * XXX: This is not an ideal solution, but I believe it to
793 * XXX: deadlock safely, all things considered.
795 bp2 = g_clone_bio(bp);
798 pause("gdstrat", hz / 10);
800 KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place"));
801 bp2->bio_done = g_dev_done;
803 "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
804 bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length,
805 bp2->bio_data, bp2->bio_cmd);
806 g_io_request(bp2, cp);
807 KASSERT(cp->acr || cp->acw,
808 ("g_dev_strategy raced with g_dev_close and lost"));
815 * Called by devfs when asynchronous device destruction is completed.
816 * - Mark that we have no attached device any more.
817 * - If there are no outstanding requests, schedule geom destruction.
818 * Otherwise destruction will be scheduled later by g_dev_done().
822 g_dev_callback(void *arg)
824 struct g_consumer *cp;
825 struct g_dev_softc *sc;
830 g_trace(G_T_TOPOLOGY, "g_dev_callback(%p(%s))", cp, cp->geom->name);
834 active = atomic_fetchadd_int(&sc->sc_active, SC_A_DESTROY);
835 if ((active & SC_A_ACTIVE) == 0)
836 g_post_event(g_dev_destroy, cp, M_WAITOK, NULL);
842 * Called from below when the provider orphaned us.
843 * - Clear any dump settings.
844 * - Request asynchronous device destruction to prevent any more requests
845 * from coming in. The provider is already marked with an error, so
846 * anything which comes in the interim will be returned immediately.
850 g_dev_orphan(struct g_consumer *cp)
853 struct g_dev_softc *sc;
858 g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, cp->geom->name);
860 /* Reset any dump-area set on this device */
861 if (dev->si_flags & SI_DUMPDEV) {
862 struct diocskerneldump_arg kda;
864 bzero(&kda, sizeof(kda));
865 kda.kda_index = KDA_REMOVE_DEV;
866 (void)dumper_remove(devtoname(dev), &kda);
869 /* Destroy the struct cdev *so we get no more requests */
871 destroy_dev_sched_cb(dev, g_dev_callback, cp);
875 gdev_filter_detach(struct knote *kn)
877 struct g_dev_softc *sc;
881 knlist_remove(&sc->sc_selinfo.si_note, kn, 0);
885 gdev_filter_vnode(struct knote *kn, long hint)
887 kn->kn_fflags |= kn->kn_sfflags & hint;
889 return (kn->kn_fflags != 0);
893 g_dev_kqfilter(struct cdev *dev, struct knote *kn)
895 struct g_dev_softc *sc;
899 if (kn->kn_filter != EVFILT_VNODE)
902 #define SUPPORTED_EVENTS (NOTE_ATTRIB | NOTE_OPEN | NOTE_CLOSE | \
903 NOTE_CLOSE_WRITE | NOTE_READ | NOTE_WRITE)
904 if (kn->kn_sfflags & ~SUPPORTED_EVENTS)
907 kn->kn_fop = &gdev_filterops_vnode;
909 knlist_add(&sc->sc_selinfo.si_note, kn, 0);
914 DECLARE_GEOM_CLASS(g_dev_class, g_dev);