2 * Copyright (c) 2009-2010 Fabio Checconi
3 * Copyright (c) 2009-2010 Luigi Rizzo, Universita` di Pisa
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * Main control module for geom-based disk schedulers ('sched').
35 * A 'sched' node is typically inserted transparently between
36 * an existing provider pp and its original geom gp
40 * using the command "geom sched insert <provider>" and
41 * resulting in the following topology
43 * [pp --> sched_gp --> cp] [new_pp --> gp ... ]
45 * Deletion "geom sched destroy <provider>.sched." restores the
46 * original chain. The normal "geom sched create <provide>"
50 * Internally, the 'sched' uses the following data structures
52 * geom{} g_sched_softc{} g_gsched{}
53 * +----------+ +---------------+ +-------------+
54 * | softc *-|--->| sc_gsched *-|-->| gs_init |
55 * | ... | | | | gs_fini |
56 * | | | [ hash table] | | gs_start |
57 * +----------+ | | | ... |
63 * +---------------+ | algorithm- |
67 * A g_sched_softc{} is created with a "geom sched insert" call.
68 * In turn this instantiates a specific scheduling algorithm,
69 * which sets sc_gsched to point to the algorithm callbacks,
70 * and calls gs_init() to create the g_*_softc{} .
71 * The other callbacks (gs_start, gs_next, ...) are invoked
74 * g_sched_softc{} is defined in g_sched.h and mostly used here;
75 * g_gsched{}, and the gs_callbacks, are documented in gs_scheduler.h;
76 * g_*_softc{} is defined/implemented by each algorithm (gs_*.c)
79 * When a bio is received on the provider, it goes to the
80 * g_sched_start() which calls gs_start() to initially queue it;
81 * then we call g_sched_dispatch() that loops around gs_next()
82 * to select zero or more bio's to be sent downstream.
84 * g_sched_dispatch() can also be called as a result of a timeout,
85 * e.g. when doing anticipation or pacing requests.
87 * When a bio comes back, it goes to g_sched_done() which in turn
88 * calls gs_done(). The latter does any necessary housekeeping in
89 * the scheduling algorithm, and may decide to call g_sched_dispatch()
90 * to send more bio's downstream.
92 * If an algorithm needs per-flow queues, these are created
93 * calling gs_init_class() and destroyed with gs_fini_class(),
94 * and they are also inserted in the hash table implemented in
97 * If an algorithm is replaced, or a transparently-inserted node is
98 * removed with "geom sched destroy", we need to remove all references
99 * to the g_*_softc{} and g_sched_softc from the bio's still in
100 * the scheduler. g_sched_forced_dispatch() helps doing this.
101 * XXX need to explain better.
104 #include <sys/cdefs.h>
105 #include <sys/param.h>
106 #include <sys/systm.h>
107 #include <sys/kernel.h>
108 #include <sys/module.h>
109 #include <sys/lock.h>
110 #include <sys/mutex.h>
112 #include <sys/limits.h>
113 #include <sys/hash.h>
114 #include <sys/sysctl.h>
115 #include <sys/malloc.h>
116 #include <sys/proc.h> /* we access curthread */
117 #include <geom/geom.h>
118 #include "gs_scheduler.h"
119 #include "g_sched.h" /* geom hooks */
122 * Size of the per-geom hash table storing traffic classes.
123 * We may decide to change it at a later time, it has no ABI
124 * implications as it is only used for run-time allocations.
126 #define G_SCHED_HASH_SIZE 32
128 static int g_sched_destroy(struct g_geom *gp, boolean_t force);
129 static int g_sched_destroy_geom(struct gctl_req *req,
130 struct g_class *mp, struct g_geom *gp);
131 static void g_sched_config(struct gctl_req *req, struct g_class *mp,
133 static struct g_geom *g_sched_taste(struct g_class *mp,
134 struct g_provider *pp, int flags __unused);
135 static void g_sched_dumpconf(struct sbuf *sb, const char *indent,
136 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
137 static void g_sched_init(struct g_class *mp);
138 static void g_sched_fini(struct g_class *mp);
139 static int g_sched_ioctl(struct g_provider *pp, u_long cmd, void *data,
140 int fflag, struct thread *td);
142 struct g_class g_sched_class = {
143 .name = G_SCHED_CLASS_NAME,
144 .version = G_VERSION,
145 .ctlreq = g_sched_config,
146 .taste = g_sched_taste,
147 .destroy_geom = g_sched_destroy_geom,
148 .init = g_sched_init,
149 .ioctl = g_sched_ioctl,
153 MALLOC_DEFINE(M_GEOM_SCHED, "GEOM_SCHED", "Geom schedulers data structures");
156 * Global variables describing the state of the geom_sched module.
157 * There is only one static instance of this structure.
159 LIST_HEAD(gs_list, g_gsched); /* type, link field */
160 struct geom_sched_vars {
162 struct gs_list gs_scheds; /* list of algorithms */
164 u_int gs_sched_count; /* how many algorithms ? */
165 u_int gs_patched; /* g_io_request was patched */
167 u_int gs_initialized;
168 u_int gs_expire_secs; /* expiration of hash entries */
170 struct bio_queue_head gs_pending;
173 /* The following are for stats, usually protected by gs_mtx. */
174 u_long gs_requests; /* total requests */
175 u_long gs_done; /* total done */
176 u_int gs_in_flight; /* requests in flight */
177 u_int gs_writes_in_flight;
178 u_int gs_bytes_in_flight;
179 u_int gs_write_bytes_in_flight;
181 char gs_names[256]; /* names of schedulers */
184 static struct geom_sched_vars me = {
185 .gs_expire_secs = 10,
188 SYSCTL_DECL(_kern_geom);
189 SYSCTL_NODE(_kern_geom, OID_AUTO, sched, CTLFLAG_RW, 0,
192 SYSCTL_INT(_kern_geom_sched, OID_AUTO, in_flight_wb, CTLFLAG_RD,
193 &me.gs_write_bytes_in_flight, 0, "Write bytes in flight");
195 SYSCTL_INT(_kern_geom_sched, OID_AUTO, in_flight_b, CTLFLAG_RD,
196 &me.gs_bytes_in_flight, 0, "Bytes in flight");
198 SYSCTL_UINT(_kern_geom_sched, OID_AUTO, in_flight_w, CTLFLAG_RD,
199 &me.gs_writes_in_flight, 0, "Write Requests in flight");
201 SYSCTL_UINT(_kern_geom_sched, OID_AUTO, in_flight, CTLFLAG_RD,
202 &me.gs_in_flight, 0, "Requests in flight");
204 SYSCTL_ULONG(_kern_geom_sched, OID_AUTO, done, CTLFLAG_RD,
205 &me.gs_done, 0, "Total done");
207 SYSCTL_ULONG(_kern_geom_sched, OID_AUTO, requests, CTLFLAG_RD,
208 &me.gs_requests, 0, "Total requests");
210 SYSCTL_STRING(_kern_geom_sched, OID_AUTO, algorithms, CTLFLAG_RD,
211 &me.gs_names, 0, "Algorithm names");
213 SYSCTL_UINT(_kern_geom_sched, OID_AUTO, alg_count, CTLFLAG_RD,
214 &me.gs_sched_count, 0, "Number of algorithms");
216 SYSCTL_UINT(_kern_geom_sched, OID_AUTO, debug, CTLFLAG_RW,
217 &me.gs_debug, 0, "Debug level");
219 SYSCTL_UINT(_kern_geom_sched, OID_AUTO, expire_secs, CTLFLAG_RW,
220 &me.gs_expire_secs, 0, "Expire time in seconds");
223 * g_sched calls the scheduler algorithms with this lock held.
224 * The locking functions are exposed so the scheduler algorithms can also
225 * protect themselves e.g. when running a callout handler.
228 g_sched_lock(struct g_geom *gp)
230 struct g_sched_softc *sc = gp->softc;
232 mtx_lock(&sc->sc_mtx);
236 g_sched_unlock(struct g_geom *gp)
238 struct g_sched_softc *sc = gp->softc;
240 mtx_unlock(&sc->sc_mtx);
244 * Support functions to handle references to the module,
245 * which are coming from devices using this scheduler.
248 g_gsched_ref(struct g_gsched *gsp)
251 atomic_add_int(&gsp->gs_refs, 1);
255 g_gsched_unref(struct g_gsched *gsp)
258 atomic_add_int(&gsp->gs_refs, -1);
262 * Update the stats when this request is done.
265 g_sched_update_stats(struct bio *bio)
270 me.gs_bytes_in_flight -= bio->bio_length;
271 if (bio->bio_cmd & BIO_WRITE) {
272 me.gs_writes_in_flight--;
273 me.gs_write_bytes_in_flight -= bio->bio_length;
278 * Dispatch any pending request.
281 g_sched_forced_dispatch(struct g_geom *gp)
283 struct g_sched_softc *sc = gp->softc;
284 struct g_gsched *gsp = sc->sc_gsched;
287 KASSERT(mtx_owned(&sc->sc_mtx),
288 ("sc_mtx not owned during forced dispatch"));
290 while ((bp = gsp->gs_next(sc->sc_data, 1)) != NULL)
291 g_io_request(bp, LIST_FIRST(&gp->consumer));
295 * The main dispatch loop, called either here after the start
296 * routine, or by scheduling algorithms when they receive a timeout
297 * or a 'done' notification. Does not share code with the forced
298 * dispatch path, since the gs_done() callback can call us.
301 g_sched_dispatch(struct g_geom *gp)
303 struct g_sched_softc *sc = gp->softc;
304 struct g_gsched *gsp = sc->sc_gsched;
307 KASSERT(mtx_owned(&sc->sc_mtx), ("sc_mtx not owned during dispatch"));
309 if ((sc->sc_flags & G_SCHED_FLUSHING))
312 while ((bp = gsp->gs_next(sc->sc_data, 0)) != NULL)
313 g_io_request(bp, LIST_FIRST(&gp->consumer));
317 * Recent (8.0 and above) versions of FreeBSD have support to
318 * register classifiers of disk requests. The classifier is
319 * invoked by g_io_request(), and stores the information into
320 * bp->bio_classifier1.
322 * Support for older versions, which is left here only for
323 * documentation purposes, relies on two hacks:
324 * 1. classification info is written into the bio_caller1
325 * field of the topmost node in the bio chain. This field
326 * is rarely used, but this module is incompatible with
327 * those that use bio_caller1 for other purposes,
328 * such as ZFS and gjournal;
329 * 2. g_io_request() is patched in-memory when the module is
330 * loaded, so that the function calls a classifier as its
331 * first thing. g_io_request() is restored when the module
332 * is unloaded. This functionality is only supported for
333 * x86 and amd64, other architectures need source code changes.
337 * Lookup the identity of the issuer of the original request.
338 * In the current implementation we use the curthread of the
339 * issuer, but different mechanisms may be implemented later
340 * so we do not make assumptions on the return value which for
341 * us is just an opaque identifier.
345 g_sched_classify(struct bio *bp)
348 #if __FreeBSD_version > 800098
349 /* we have classifier fields in the struct bio */
350 #define HAVE_BIO_CLASSIFIER
351 return ((u_long)bp->bio_classifier1);
353 #warning old version!!!
354 while (bp->bio_parent != NULL)
357 return ((u_long)bp->bio_caller1);
361 /* Return the hash chain for the given key. */
362 static inline struct g_hash *
363 g_sched_hash(struct g_sched_softc *sc, u_long key)
366 return (&sc->sc_hash[key & sc->sc_mask]);
370 * Helper function for the children classes, which takes
371 * a geom and a bio and returns the private descriptor
372 * associated to the request. This involves fetching
373 * the classification field and [al]locating the
374 * corresponding entry in the hash table.
377 g_sched_get_class(struct g_geom *gp, struct bio *bp)
379 struct g_sched_softc *sc;
380 struct g_sched_class *gsc;
381 struct g_gsched *gsp;
382 struct g_hash *bucket;
386 key = g_sched_classify(bp);
387 bucket = g_sched_hash(sc, key);
388 LIST_FOREACH(gsc, bucket, gsc_clist) {
389 if (key == gsc->gsc_key) {
391 return (gsc->gsc_priv);
396 gsc = malloc(sizeof(*gsc) + gsp->gs_priv_size,
397 M_GEOM_SCHED, M_NOWAIT | M_ZERO);
401 if (gsp->gs_init_class(sc->sc_data, gsc->gsc_priv)) {
402 free(gsc, M_GEOM_SCHED);
406 gsc->gsc_refs = 2; /* 1 for the hash table, 1 for the caller. */
408 LIST_INSERT_HEAD(bucket, gsc, gsc_clist);
410 gsc->gsc_expire = ticks + me.gs_expire_secs * hz;
412 return (gsc->gsc_priv);
416 * Release a reference to the per-client descriptor,
419 g_sched_put_class(struct g_geom *gp, void *priv)
421 struct g_sched_class *gsc;
422 struct g_sched_softc *sc;
424 gsc = g_sched_priv2class(priv);
425 gsc->gsc_expire = ticks + me.gs_expire_secs * hz;
427 if (--gsc->gsc_refs > 0)
431 sc->sc_gsched->gs_fini_class(sc->sc_data, priv);
433 LIST_REMOVE(gsc, gsc_clist);
434 free(gsc, M_GEOM_SCHED);
438 g_sched_hash_fini(struct g_geom *gp, struct g_hash *hp, u_long mask,
439 struct g_gsched *gsp, void *data)
441 struct g_sched_class *cp, *cp2;
447 if (data && gsp->gs_hash_unref)
448 gsp->gs_hash_unref(data);
450 for (i = 0; i < G_SCHED_HASH_SIZE; i++) {
451 LIST_FOREACH_SAFE(cp, &hp[i], gsc_clist, cp2)
452 g_sched_put_class(gp, cp->gsc_priv);
455 hashdestroy(hp, M_GEOM_SCHED, mask);
458 static struct g_hash *
459 g_sched_hash_init(struct g_gsched *gsp, u_long *mask, int flags)
463 if (gsp->gs_priv_size == 0)
466 hash = hashinit_flags(G_SCHED_HASH_SIZE, M_GEOM_SCHED, mask, flags);
472 g_sched_flush_classes(struct g_geom *gp)
474 struct g_sched_softc *sc;
475 struct g_sched_class *cp, *cp2;
480 if (!sc->sc_hash || ticks - sc->sc_flush_ticks <= 0)
483 for (i = 0; i < G_SCHED_HASH_SIZE; i++) {
484 LIST_FOREACH_SAFE(cp, &sc->sc_hash[i], gsc_clist, cp2) {
485 if (cp->gsc_refs == 1 && ticks - cp->gsc_expire > 0)
486 g_sched_put_class(gp, cp->gsc_priv);
490 sc->sc_flush_ticks = ticks + me.gs_expire_secs * hz;
494 * Wait for the completion of any outstanding request. To ensure
495 * that this does not take forever the caller has to make sure that
496 * no new request enter the scehduler before calling us.
498 * Must be called with the gp mutex held and topology locked.
501 g_sched_wait_pending(struct g_geom *gp)
503 struct g_sched_softc *sc = gp->softc;
504 int endticks = ticks + hz;
508 while (sc->sc_pending && endticks - ticks >= 0)
509 msleep(gp, &sc->sc_mtx, 0, "sched_wait_pending", hz / 4);
511 return (sc->sc_pending ? ETIMEDOUT : 0);
515 g_sched_remove_locked(struct g_geom *gp, struct g_gsched *gsp)
517 struct g_sched_softc *sc = gp->softc;
520 /* Set the flushing flag: new bios will not enter the scheduler. */
521 sc->sc_flags |= G_SCHED_FLUSHING;
523 g_sched_forced_dispatch(gp);
524 error = g_sched_wait_pending(gp);
528 /* No more requests pending or in flight from the old gsp. */
530 g_sched_hash_fini(gp, sc->sc_hash, sc->sc_mask, gsp, sc->sc_data);
534 * Avoid deadlock here by releasing the gp mutex and reacquiring
535 * it once done. It should be safe, since no reconfiguration or
536 * destruction can take place due to the geom topology lock; no
537 * new request can use the current sc_data since we flagged the
538 * geom as being flushed.
541 gsp->gs_fini(sc->sc_data);
544 sc->sc_gsched = NULL;
549 sc->sc_flags &= ~G_SCHED_FLUSHING;
555 g_sched_remove(struct g_geom *gp, struct g_gsched *gsp)
560 error = g_sched_remove_locked(gp, gsp); /* gsp is surely non-null */
567 * Support function for create/taste -- locate the desired
568 * algorithm and grab a reference to it.
570 static struct g_gsched *
571 g_gsched_find(const char *name)
573 struct g_gsched *gsp = NULL;
575 mtx_lock(&me.gs_mtx);
576 LIST_FOREACH(gsp, &me.gs_scheds, glist) {
577 if (strcmp(name, gsp->gs_name) == 0) {
582 mtx_unlock(&me.gs_mtx);
588 * Rebuild the list of scheduler names.
589 * To be called with me.gs_mtx lock held.
592 g_gsched_build_names(struct g_gsched *gsp)
595 struct g_gsched *cur;
598 LIST_FOREACH(cur, &me.gs_scheds, glist) {
599 l = strlen(cur->gs_name);
600 if (l + pos + 1 + 1 < sizeof(me.gs_names)) {
602 me.gs_names[pos++] = ' ';
603 strcpy(me.gs_names + pos, cur->gs_name);
607 me.gs_names[pos] = '\0';
611 * Register or unregister individual scheduling algorithms.
614 g_gsched_register(struct g_gsched *gsp)
616 struct g_gsched *cur;
619 mtx_lock(&me.gs_mtx);
620 LIST_FOREACH(cur, &me.gs_scheds, glist) {
621 if (strcmp(gsp->gs_name, cur->gs_name) == 0)
625 G_SCHED_DEBUG(0, "A scheduler named %s already"
626 "exists.", gsp->gs_name);
629 LIST_INSERT_HEAD(&me.gs_scheds, gsp, glist);
632 g_gsched_build_names(gsp);
634 mtx_unlock(&me.gs_mtx);
639 struct g_gsched_unregparm {
640 struct g_gsched *gup_gsp;
645 g_gsched_unregister(void *arg, int flag)
647 struct g_gsched_unregparm *parm = arg;
648 struct g_gsched *gsp = parm->gup_gsp, *cur, *tmp;
649 struct g_sched_softc *sc;
650 struct g_geom *gp, *gp_tmp;
657 if (flag == EV_CANCEL)
660 mtx_lock(&me.gs_mtx);
662 LIST_FOREACH_SAFE(gp, &g_sched_class.geom, geom, gp_tmp) {
663 if (gp->class != &g_sched_class)
664 continue; /* Should not happen. */
667 if (sc->sc_gsched == gsp) {
668 error = g_sched_remove(gp, gsp);
674 LIST_FOREACH_SAFE(cur, &me.gs_scheds, glist, tmp) {
678 if (gsp->gs_refs != 1) {
679 G_SCHED_DEBUG(0, "%s still in use.",
681 parm->gup_error = EBUSY;
683 LIST_REMOVE(gsp, glist);
685 g_gsched_build_names(gsp);
691 G_SCHED_DEBUG(0, "%s not registered.", gsp->gs_name);
692 parm->gup_error = ENOENT;
696 mtx_unlock(&me.gs_mtx);
700 g_gsched_global_init(void)
703 if (!me.gs_initialized) {
704 G_SCHED_DEBUG(0, "Initializing global data.");
705 mtx_init(&me.gs_mtx, "gsched", NULL, MTX_DEF);
706 LIST_INIT(&me.gs_scheds);
707 gs_bioq_init(&me.gs_pending);
708 me.gs_initialized = 1;
713 * Module event called when a scheduling algorithm module is loaded or
717 g_gsched_modevent(module_t mod, int cmd, void *arg)
719 struct g_gsched *gsp = arg;
720 struct g_gsched_unregparm parm;
723 G_SCHED_DEBUG(0, "Modevent %d.", cmd);
726 * If the module is loaded at boot, the geom thread that calls
727 * g_sched_init() might actually run after g_gsched_modevent(),
728 * so make sure that the module is properly initialized.
730 g_gsched_global_init();
735 error = g_gsched_register(gsp);
736 G_SCHED_DEBUG(0, "Loaded module %s error %d.",
737 gsp->gs_name, error);
739 g_retaste(&g_sched_class);
746 error = g_waitfor_event(g_gsched_unregister,
747 &parm, M_WAITOK, NULL);
749 error = parm.gup_error;
750 G_SCHED_DEBUG(0, "Unloaded module %s error %d.",
751 gsp->gs_name, error);
759 #define TRC_BIO_EVENT(e, bp) g_sched_trace_bio_ ## e (bp)
762 g_sched_type(struct bio *bp)
765 if (0 != (bp->bio_cmd & BIO_READ))
767 else if (0 != (bp->bio_cmd & BIO_WRITE))
773 g_sched_trace_bio_START(struct bio *bp)
776 CTR5(KTR_GSCHED, "S %lu %c %lu/%lu %lu", g_sched_classify(bp),
777 g_sched_type(bp), bp->bio_offset / ULONG_MAX,
778 bp->bio_offset, bp->bio_length);
782 g_sched_trace_bio_DONE(struct bio *bp)
785 CTR5(KTR_GSCHED, "D %lu %c %lu/%lu %lu", g_sched_classify(bp),
786 g_sched_type(bp), bp->bio_offset / ULONG_MAX,
787 bp->bio_offset, bp->bio_length);
790 #define TRC_BIO_EVENT(e, bp)
794 * g_sched_done() and g_sched_start() dispatch the geom requests to
795 * the scheduling algorithm in use.
798 g_sched_done(struct bio *bio)
800 struct g_geom *gp = bio->bio_caller2;
801 struct g_sched_softc *sc = gp->softc;
803 TRC_BIO_EVENT(DONE, bio);
805 KASSERT(bio->bio_caller1, ("null bio_caller1 in g_sched_done"));
809 g_sched_update_stats(bio);
810 sc->sc_gsched->gs_done(sc->sc_data, bio);
811 if (!--sc->sc_pending)
814 g_sched_flush_classes(gp);
821 g_sched_start(struct bio *bp)
823 struct g_geom *gp = bp->bio_to->geom;
824 struct g_sched_softc *sc = gp->softc;
827 TRC_BIO_EVENT(START, bp);
828 G_SCHED_LOGREQ(bp, "Request received.");
830 cbp = g_clone_bio(bp);
832 g_io_deliver(bp, ENOMEM);
835 cbp->bio_done = g_sched_done;
836 cbp->bio_to = LIST_FIRST(&gp->provider);
837 KASSERT(cbp->bio_to != NULL, ("NULL provider"));
839 /* We only schedule reads and writes. */
840 if (0 == (bp->bio_cmd & (BIO_READ | BIO_WRITE)))
843 G_SCHED_LOGREQ(cbp, "Sending request.");
847 * Call the algorithm's gs_start to queue the request in the
848 * scheduler. If gs_start fails then pass the request down,
849 * otherwise call g_sched_dispatch() which tries to push
850 * one or more requests down.
852 if (!sc->sc_gsched || (sc->sc_flags & G_SCHED_FLUSHING) ||
853 sc->sc_gsched->gs_start(sc->sc_data, cbp)) {
858 * We use bio_caller1 to mark requests that are scheduled
859 * so make sure it is not NULL.
861 if (cbp->bio_caller1 == NULL)
862 cbp->bio_caller1 = &me; /* anything not NULL */
864 cbp->bio_caller2 = gp;
867 /* Update general stats. */
870 me.gs_bytes_in_flight += bp->bio_length;
871 if (bp->bio_cmd & BIO_WRITE) {
872 me.gs_writes_in_flight++;
873 me.gs_write_bytes_in_flight += bp->bio_length;
875 g_sched_dispatch(gp);
880 cbp->bio_done = g_std_done;
881 cbp->bio_caller1 = NULL; /* not scheduled */
882 g_io_request(cbp, LIST_FIRST(&gp->consumer));
886 * The next few functions are the geom glue.
889 g_sched_orphan(struct g_consumer *cp)
893 g_sched_destroy(cp->geom, 1);
897 g_sched_access(struct g_provider *pp, int dr, int dw, int de)
900 struct g_consumer *cp;
904 cp = LIST_FIRST(&gp->consumer);
905 error = g_access(cp, dr, dw, de);
911 g_sched_temporary_start(struct bio *bio)
914 mtx_lock(&me.gs_mtx);
916 gs_bioq_disksort(&me.gs_pending, bio);
917 mtx_unlock(&me.gs_mtx);
921 g_sched_flush_pending(g_start_t *start)
925 while ((bp = gs_bioq_takefirst(&me.gs_pending)))
930 g_insert_proxy(struct g_geom *gp, struct g_provider *newpp,
931 struct g_geom *dstgp, struct g_provider *pp, struct g_consumer *cp)
933 struct g_sched_softc *sc = gp->softc;
934 g_start_t *saved_start, *flush = g_sched_start;
935 int error = 0, endticks = ticks + hz;
937 g_cancel_event(newpp); /* prevent taste() */
938 /* copy private fields */
939 newpp->private = pp->private;
940 newpp->index = pp->index;
942 /* Queue all the early requests coming for us. */
944 saved_start = pp->geom->start;
945 dstgp->start = g_sched_temporary_start;
947 while (pp->nstart - pp->nend != me.gs_npending &&
948 endticks - ticks >= 0)
949 tsleep(pp, PRIBIO, "-", hz/10);
951 if (pp->nstart - pp->nend != me.gs_npending) {
957 /* link pp to this geom */
958 LIST_REMOVE(pp, provider);
960 LIST_INSERT_HEAD(&gp->provider, pp, provider);
963 * replicate the counts from the parent in the
964 * new provider and consumer nodes
966 cp->acr = newpp->acr = pp->acr;
967 cp->acw = newpp->acw = pp->acw;
968 cp->ace = newpp->ace = pp->ace;
969 sc->sc_flags |= G_SCHED_PROXYING;
972 dstgp->start = saved_start;
974 g_sched_flush_pending(flush);
980 * Create a geom node for the device passed as *pp.
981 * If successful, add a reference to this gsp.
984 g_sched_create(struct gctl_req *req, struct g_class *mp,
985 struct g_provider *pp, struct g_gsched *gsp, int proxy)
987 struct g_sched_softc *sc = NULL;
988 struct g_geom *gp, *dstgp;
989 struct g_provider *newpp = NULL;
990 struct g_consumer *cp = NULL;
996 snprintf(name, sizeof(name), "%s%s", pp->name, G_SCHED_SUFFIX);
997 LIST_FOREACH(gp, &mp->geom, geom) {
998 if (strcmp(gp->name, name) == 0) {
999 gctl_error(req, "Geom %s already exists.",
1005 gp = g_new_geomf(mp, name);
1006 dstgp = proxy ? pp->geom : gp; /* where do we link the provider */
1008 gctl_error(req, "Cannot create geom %s.", name);
1013 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
1014 sc->sc_gsched = gsp;
1015 sc->sc_data = gsp->gs_init(gp);
1016 if (sc->sc_data == NULL) {
1021 sc->sc_hash = g_sched_hash_init(gsp, &sc->sc_mask, HASH_WAITOK);
1024 * Do not initialize the flush mechanism, will be initialized
1025 * on the first insertion on the hash table.
1028 mtx_init(&sc->sc_mtx, "g_sched_mtx", NULL, MTX_DEF);
1031 gp->start = g_sched_start;
1032 gp->orphan = g_sched_orphan;
1033 gp->access = g_sched_access;
1034 gp->dumpconf = g_sched_dumpconf;
1036 newpp = g_new_providerf(dstgp, gp->name);
1037 if (newpp == NULL) {
1038 gctl_error(req, "Cannot create provider %s.", name);
1043 newpp->mediasize = pp->mediasize;
1044 newpp->sectorsize = pp->sectorsize;
1046 cp = g_new_consumer(gp);
1048 gctl_error(req, "Cannot create consumer for %s.",
1054 error = g_attach(cp, proxy ? newpp : pp);
1056 gctl_error(req, "Cannot attach to provider %s.",
1061 g_error_provider(newpp, 0);
1063 error = g_insert_proxy(gp, newpp, dstgp, pp, cp);
1067 G_SCHED_DEBUG(0, "Device %s created.", gp->name);
1075 if (cp->provider != NULL)
1077 g_destroy_consumer(cp);
1081 g_destroy_provider(newpp);
1083 if (sc && sc->sc_hash) {
1084 g_sched_hash_fini(gp, sc->sc_hash, sc->sc_mask,
1088 if (sc && sc->sc_data)
1089 gsp->gs_fini(sc->sc_data);
1092 if (gp->softc != NULL)
1101 * Support for dynamic switching of scheduling algorithms.
1102 * First initialize the data structures for the new algorithm,
1103 * then call g_sched_remove_locked() to flush all references
1104 * to the old one, finally link the new algorithm.
1107 g_sched_change_algo(struct gctl_req *req, struct g_class *mp,
1108 struct g_provider *pp, struct g_gsched *gsp)
1110 struct g_sched_softc *sc;
1112 struct g_hash *newh;
1120 data = gsp->gs_init(gp);
1124 newh = g_sched_hash_init(gsp, &mask, HASH_WAITOK);
1125 if (gsp->gs_priv_size && !newh) {
1131 if (sc->sc_gsched) { /* can be NULL in some cases */
1132 error = g_sched_remove_locked(gp, sc->sc_gsched);
1138 sc->sc_gsched = gsp;
1149 g_sched_hash_fini(gp, newh, mask, gsp, data);
1160 * Stop the request flow directed to the proxy, redirecting the new
1161 * requests to the me.gs_pending queue.
1163 static struct g_provider *
1164 g_detach_proxy(struct g_geom *gp)
1166 struct g_consumer *cp;
1167 struct g_provider *pp, *newpp;
1170 pp = LIST_FIRST(&gp->provider);
1173 cp = LIST_FIRST(&gp->consumer);
1176 newpp = cp->provider;
1181 pp->geom->start = g_sched_temporary_start;
1185 printf("%s error detaching proxy %s\n", __FUNCTION__, gp->name);
1191 g_sched_blackhole(struct bio *bp)
1194 g_io_deliver(bp, ENXIO);
1198 g_reparent_provider(struct g_provider *pp, struct g_geom *gp,
1199 struct g_provider *newpp)
1202 LIST_REMOVE(pp, provider);
1204 pp->private = newpp->private;
1205 pp->index = newpp->index;
1208 LIST_INSERT_HEAD(&gp->provider, pp, provider);
1212 g_unproxy_provider(struct g_provider *oldpp, struct g_provider *newpp)
1214 struct g_geom *gp = oldpp->geom;
1216 g_reparent_provider(oldpp, newpp->geom, newpp);
1219 * Hackish: let the system destroy the old provider for us, just
1220 * in case someone attached a consumer to it, in which case a
1221 * direct call to g_destroy_provider() would not work.
1223 g_reparent_provider(newpp, gp, NULL);
1227 * Complete the proxy destruction, linking the old provider to its
1228 * original geom, and destroying the proxy provider. Also take care
1229 * of issuing the pending requests collected in me.gs_pending (if any).
1232 g_destroy_proxy(struct g_geom *gp, struct g_provider *oldpp)
1234 struct g_consumer *cp;
1235 struct g_provider *newpp;
1238 cp = LIST_FIRST(&gp->consumer);
1241 newpp = cp->provider;
1245 /* Relink the provider to its original geom. */
1246 g_unproxy_provider(oldpp, newpp);
1248 /* Detach consumer from provider, and destroy provider. */
1249 cp->acr = newpp->acr = 0;
1250 cp->acw = newpp->acw = 0;
1251 cp->ace = newpp->ace = 0;
1254 /* Send the pending bios through the right start function. */
1255 g_sched_flush_pending(oldpp->geom->start);
1259 printf("%s error destroying proxy %s\n", __FUNCTION__, gp->name);
1261 /* We cannot send the pending bios anywhere... */
1262 g_sched_flush_pending(g_sched_blackhole);
1268 g_sched_destroy(struct g_geom *gp, boolean_t force)
1270 struct g_provider *pp, *oldpp = NULL;
1271 struct g_sched_softc *sc;
1272 struct g_gsched *gsp;
1275 g_topology_assert();
1279 if (!(sc->sc_flags & G_SCHED_PROXYING)) {
1280 pp = LIST_FIRST(&gp->provider);
1281 if (pp && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
1282 const char *msg = force ?
1283 "but we force removal" : "cannot remove";
1285 G_SCHED_DEBUG(!force,
1286 "Device %s is still open (r%dw%de%d), %s.",
1287 pp->name, pp->acr, pp->acw, pp->ace, msg);
1291 G_SCHED_DEBUG(0, "Device %s removed.", gp->name);
1294 oldpp = g_detach_proxy(gp);
1296 gsp = sc->sc_gsched;
1299 * XXX bad hack here: force a dispatch to release
1300 * any reference to the hash table still held by
1305 * We are dying here, no new requests should enter
1306 * the scheduler. This is granted by the topolgy,
1307 * either in case we were proxying (new bios are
1308 * being redirected) or not (see the access check
1311 g_sched_forced_dispatch(gp);
1312 error = g_sched_wait_pending(gp);
1316 * Not all the requests came home: this might happen
1317 * under heavy load, or if we were waiting for any
1318 * bio which is served in the event path (see
1319 * geom_slice.c for an example of how this can
1320 * happen). Try to restore a working configuration
1323 if ((sc->sc_flags & G_SCHED_PROXYING) && oldpp) {
1324 g_sched_flush_pending(force ?
1325 g_sched_blackhole : g_sched_start);
1329 * In the forced destroy case there is not so much
1330 * we can do, we have pending bios that will call
1331 * g_sched_done() somehow, and we don't want them
1332 * to crash the system using freed memory. We tell
1333 * the user that something went wrong, and leak some
1335 * Note: the callers using force = 1 ignore the
1339 G_SCHED_DEBUG(0, "Pending requests while "
1340 " destroying geom, some memory leaked.");
1347 g_sched_hash_fini(gp, sc->sc_hash, sc->sc_mask,
1350 gsp->gs_fini(sc->sc_data);
1351 g_gsched_unref(gsp);
1352 sc->sc_gsched = NULL;
1355 if ((sc->sc_flags & G_SCHED_PROXYING) && oldpp) {
1356 error = g_destroy_proxy(gp, oldpp);
1360 G_SCHED_DEBUG(0, "Unrecoverable error while "
1361 "destroying a proxy geom, leaking some "
1369 mtx_destroy(&sc->sc_mtx);
1373 g_wither_geom(gp, ENXIO);
1379 g_sched_destroy_geom(struct gctl_req *req, struct g_class *mp,
1383 return (g_sched_destroy(gp, 0));
1387 * Functions related to the classification of requests.
1389 * On recent FreeBSD versions (8.0 and above), we store a reference
1390 * to the issuer of a request in bp->bio_classifier1 as soon
1391 * as the bio is posted to the geom queue (and not later, because
1392 * requests are managed by the g_down thread afterwards).
1394 * On older versions of the system (but this code is not used
1395 * in any existing release), we [ab]use the caller1 field in the
1396 * root element of the bio tree to store the classification info.
1397 * The marking is done at the beginning of g_io_request()
1398 * and only if we find that the field is NULL.
1400 * To avoid rebuilding the kernel, this module will patch the
1401 * initial part of g_io_request() so it jumps to some hand-coded
1402 * assembly that does the marking and then executes the original
1403 * body of g_io_request().
1405 * fake_ioreq[] is architecture-specific machine code
1406 * that implements the above. CODE_SIZE, STORE_SIZE etc.
1407 * are constants used in the patching routine. Look at the
1408 * code in g_ioreq_patch() for the details.
1411 #ifndef HAVE_BIO_CLASSIFIER
1413 * Support for old FreeBSD versions
1415 #if defined(__i386__)
1416 #define CODE_SIZE 29
1417 #define STORE_SIZE 5
1419 #define SIZE (CODE_SIZE + STORE_SIZE + EPILOGUE)
1421 static u_char fake_ioreq[SIZE] = {
1422 0x8b, 0x44, 0x24, 0x04, /* mov bp, %eax */
1424 0x89, 0xc2, /* mov %eax, %edx # edx = bp */
1425 0x8b, 0x40, 0x64, /* mov bp->bio_parent, %eax */
1426 0x85, 0xc0, /* test %eax, %eax */
1427 0x75, 0xf7, /* jne 1b */
1428 0x8b, 0x42, 0x30, /* mov bp->bp_caller1, %eax */
1429 0x85, 0xc0, /* test %eax, %eax */
1430 0x75, 0x09, /* jne 2f */
1431 0x64, 0xa1, 0x00, 0x00, /* mov %fs:0, %eax */
1433 0x89, 0x42, 0x30, /* mov %eax, bp->bio_caller1 */
1435 0x55, 0x89, 0xe5, 0x57, 0x56,
1436 0xe9, 0x00, 0x00, 0x00, 0x00, /* jmp back... */
1438 #elif defined(__amd64)
1439 #define CODE_SIZE 38
1440 #define STORE_SIZE 6
1442 #define SIZE (CODE_SIZE + STORE_SIZE + EPILOGUE)
1444 static u_char fake_ioreq[SIZE] = {
1445 0x48, 0x89, 0xf8, /* mov bp, %rax */
1447 0x48, 0x89, 0xc2, /* mov %rax, %rdx # rdx = bp */
1448 0x48, 0x8b, 0x82, 0xa8, /* mov bp->bio_parent, %rax */
1450 0x48, 0x85, 0xc0, /* test %rax, %rax */
1451 0x75, 0xf1, /* jne 1b */
1452 0x48, 0x83, 0x7a, 0x58, /* cmp $0, bp->bp_caller1 */
1454 0x75, 0x0d, /* jne 2f */
1455 0x65, 0x48, 0x8b, 0x04, /* mov %gs:0, %rax */
1456 0x25, 0x00, 0x00, 0x00,
1458 0x48, 0x89, 0x42, 0x58, /* mov %rax, bp->bio_caller1 */
1460 0x55, 0x48, 0x89, 0xe5, 0x41, 0x56,
1461 0xe9, 0x00, 0x00, 0x00, 0x00, /* jmp back... */
1463 #else /* neither x86 nor amd64 */
1465 g_new_io_request(struct bio *bp, struct g_consumer *cp)
1467 struct bio *top = bp;
1470 * bio classification: if bio_caller1 is available in the
1471 * root of the 'struct bio' tree, store there the thread id
1472 * of the thread that originated the request.
1473 * More sophisticated classification schemes can be used.
1475 while (top->bio_parent)
1476 top = top->bio_parent;
1478 if (top->bio_caller1 == NULL)
1479 top->bio_caller1 = curthread;
1482 #error please add the code above in g_new_io_request() to the beginning of \
1483 /sys/geom/geom_io.c::g_io_request(), and remove this line.
1484 #endif /* end of arch-specific code */
1496 original = (u_char *)g_io_request;
1498 found = !bcmp(original, fake_ioreq + CODE_SIZE, STORE_SIZE);
1502 /* Jump back to the original + STORE_SIZE. */
1503 ofs = (original + STORE_SIZE) - (fake_ioreq + SIZE);
1504 bcopy(&ofs, fake_ioreq + CODE_SIZE + STORE_SIZE + 1, 4);
1506 /* Patch the original address with a jump to the trampoline. */
1507 *original = 0xe9; /* jump opcode */
1508 ofs = fake_ioreq - (original + 5);
1509 bcopy(&ofs, original + 1, 4);
1517 * Restore the original code, this is easy.
1520 g_ioreq_restore(void)
1524 if (me.gs_patched) {
1525 original = (u_char *)g_io_request;
1526 bcopy(fake_ioreq + CODE_SIZE, original, STORE_SIZE);
1532 g_classifier_ini(void)
1539 g_classifier_fini(void)
1545 /*--- end of support code for older FreeBSD versions */
1547 #else /* HAVE_BIO_CLASSIFIER */
1550 * Classifier support for recent FreeBSD versions: we use
1551 * a very simple classifier, only use curthread to tag a request.
1552 * The classifier is registered at module load, and unregistered
1556 g_sched_tag(void *arg, struct bio *bp)
1559 bp->bio_classifier1 = curthread;
1563 static struct g_classifier_hook g_sched_classifier = {
1564 .func = g_sched_tag,
1568 g_classifier_ini(void)
1571 g_register_classifier(&g_sched_classifier);
1575 g_classifier_fini(void)
1578 g_unregister_classifier(&g_sched_classifier);
1580 #endif /* HAVE_BIO_CLASSIFIER */
1583 g_sched_init(struct g_class *mp)
1586 g_gsched_global_init();
1588 G_SCHED_DEBUG(0, "Loading: mp = %p, g_sched_class = %p.",
1589 mp, &g_sched_class);
1591 /* Patch g_io_request to store classification info in the bio. */
1596 g_sched_fini(struct g_class *mp)
1599 g_classifier_fini();
1601 G_SCHED_DEBUG(0, "Unloading...");
1603 KASSERT(LIST_EMPTY(&me.gs_scheds), ("still registered schedulers"));
1604 mtx_destroy(&me.gs_mtx);
1608 g_sched_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag,
1611 struct g_consumer *cp;
1614 cp = LIST_FIRST(&pp->geom->consumer);
1617 gp = cp->provider->geom;
1618 if (gp->ioctl == NULL)
1620 return (gp->ioctl(cp->provider, cmd, data, fflag, td));
1624 * Read the i-th argument for a request, skipping the /dev/
1625 * prefix if present.
1628 g_sched_argi(struct gctl_req *req, int i)
1630 static const char *dev_prefix = "/dev/";
1633 int l = strlen(dev_prefix);
1635 snprintf(param, sizeof(param), "arg%d", i);
1636 name = gctl_get_asciiparam(req, param);
1638 gctl_error(req, "No 'arg%d' argument", i);
1639 else if (strncmp(name, dev_prefix, l) == 0)
1645 * Fetch nargs and do appropriate checks.
1648 g_sched_get_nargs(struct gctl_req *req)
1652 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
1653 if (nargs == NULL) {
1654 gctl_error(req, "No 'nargs' argument");
1658 gctl_error(req, "Missing device(s).");
1663 * Check whether we should add the class on certain volumes when
1664 * this geom is created. Right now this is under control of a kenv
1665 * variable containing the names of all devices that we care about.
1666 * Probably we should only support transparent insertion as the
1667 * preferred mode of operation.
1669 static struct g_geom *
1670 g_sched_taste(struct g_class *mp, struct g_provider *pp,
1673 struct g_gsched *gsp = NULL; /* the . algorithm we want */
1674 const char *s; /* generic string pointer */
1675 const char *taste_names; /* devices we like */
1678 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__,
1679 mp->name, pp->name);
1680 g_topology_assert();
1682 G_SCHED_DEBUG(2, "Tasting %s.", pp->name);
1685 /* do not taste on ourselves */
1686 if (pp->geom->class == mp)
1689 taste_names = getenv("geom.sched.taste");
1690 if (taste_names == NULL)
1693 l = strlen(pp->name);
1694 for (s = taste_names; *s &&
1695 (s = strstr(s, pp->name)); s++) {
1696 /* further checks for an exact match */
1697 if ( (s == taste_names || s[-1] == ' ') &&
1698 (s[l] == '\0' || s[l] == ' ') )
1703 G_SCHED_DEBUG(0, "Attach device %s match [%s]\n",
1706 /* look up the provider name in the list */
1707 s = getenv("geom.sched.algo");
1711 gsp = g_gsched_find(s); /* also get a reference */
1713 G_SCHED_DEBUG(0, "Bad '%s' algorithm.", s);
1717 /* XXX create with 1 as last argument ? */
1718 g_sched_create(NULL, mp, pp, gsp, 0);
1719 g_gsched_unref(gsp);
1725 g_sched_ctl_create(struct gctl_req *req, struct g_class *mp, int proxy)
1727 struct g_provider *pp;
1728 struct g_gsched *gsp;
1732 g_topology_assert();
1734 name = gctl_get_asciiparam(req, "algo");
1736 gctl_error(req, "No '%s' argument", "algo");
1740 gsp = g_gsched_find(name); /* also get a reference */
1742 gctl_error(req, "Bad algorithm '%s'", name);
1746 nargs = g_sched_get_nargs(req);
1749 * Run on the arguments, and break on any error.
1750 * We look for a device name, but skip the /dev/ prefix if any.
1752 for (i = 0; i < nargs; i++) {
1753 name = g_sched_argi(req, i);
1756 pp = g_provider_by_name(name);
1758 G_SCHED_DEBUG(1, "Provider %s is invalid.", name);
1759 gctl_error(req, "Provider %s is invalid.", name);
1762 if (g_sched_create(req, mp, pp, gsp, proxy) != 0)
1766 g_gsched_unref(gsp);
1770 g_sched_ctl_configure(struct gctl_req *req, struct g_class *mp)
1772 struct g_provider *pp;
1773 struct g_gsched *gsp;
1777 g_topology_assert();
1779 name = gctl_get_asciiparam(req, "algo");
1781 gctl_error(req, "No '%s' argument", "algo");
1785 gsp = g_gsched_find(name); /* also get a reference */
1787 gctl_error(req, "Bad algorithm '%s'", name);
1791 nargs = g_sched_get_nargs(req);
1794 * Run on the arguments, and break on any error.
1795 * We look for a device name, but skip the /dev/ prefix if any.
1797 for (i = 0; i < nargs; i++) {
1798 name = g_sched_argi(req, i);
1801 pp = g_provider_by_name(name);
1802 if (pp == NULL || pp->geom->class != mp) {
1803 G_SCHED_DEBUG(1, "Provider %s is invalid.", name);
1804 gctl_error(req, "Provider %s is invalid.", name);
1807 if (g_sched_change_algo(req, mp, pp, gsp) != 0)
1811 g_gsched_unref(gsp);
1814 static struct g_geom *
1815 g_sched_find_geom(struct g_class *mp, const char *name)
1819 LIST_FOREACH(gp, &mp->geom, geom) {
1820 if (strcmp(gp->name, name) == 0)
1827 g_sched_ctl_destroy(struct gctl_req *req, struct g_class *mp)
1829 int nargs, *force, error, i;
1833 g_topology_assert();
1835 nargs = g_sched_get_nargs(req);
1837 force = gctl_get_paraml(req, "force", sizeof(*force));
1838 if (force == NULL) {
1839 gctl_error(req, "No 'force' argument");
1843 for (i = 0; i < nargs; i++) {
1844 name = g_sched_argi(req, i);
1848 gp = g_sched_find_geom(mp, name);
1850 G_SCHED_DEBUG(1, "Device %s is invalid.", name);
1851 gctl_error(req, "Device %s is invalid.", name);
1855 error = g_sched_destroy(gp, *force);
1857 gctl_error(req, "Cannot destroy device %s (error=%d).",
1865 g_sched_config(struct gctl_req *req, struct g_class *mp, const char *verb)
1869 g_topology_assert();
1871 version = gctl_get_paraml(req, "version", sizeof(*version));
1872 if (version == NULL) {
1873 gctl_error(req, "No '%s' argument.", "version");
1877 if (*version != G_SCHED_VERSION) {
1878 gctl_error(req, "Userland and kernel parts are "
1883 if (strcmp(verb, "create") == 0) {
1884 g_sched_ctl_create(req, mp, 0);
1886 } else if (strcmp(verb, "insert") == 0) {
1887 g_sched_ctl_create(req, mp, 1);
1889 } else if (strcmp(verb, "configure") == 0) {
1890 g_sched_ctl_configure(req, mp);
1892 } else if (strcmp(verb, "destroy") == 0) {
1893 g_sched_ctl_destroy(req, mp);
1897 gctl_error(req, "Unknown verb.");
1901 g_sched_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1902 struct g_consumer *cp, struct g_provider *pp)
1904 struct g_sched_softc *sc = gp->softc;
1905 struct g_gsched *gsp = sc->sc_gsched;
1906 if (indent == NULL) { /* plaintext */
1907 sbuf_printf(sb, " algo %s", gsp ? gsp->gs_name : "--");
1909 if (gsp != NULL && gsp->gs_dumpconf)
1910 gsp->gs_dumpconf(sb, indent, gp, cp, pp);
1913 DECLARE_GEOM_CLASS(g_sched_class, g_sched);
1914 MODULE_VERSION(geom_sched, 0);