2 * Copyright (c) 2009-2010 Fabio Checconi
3 * Copyright (c) 2009-2010 Luigi Rizzo, Universita` di Pisa
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * A round-robin (RR) anticipatory scheduler, with per-client queues.
34 * The goal of this implementation is to improve throughput compared
35 * to the pure elevator algorithm, and insure some fairness among
38 * Requests coming from the same client are put in the same queue.
39 * We use anticipation to help reducing seeks, and each queue
40 * is never served continuously for more than a given amount of
41 * time or data. Queues are then served in a round-robin fashion.
43 * Each queue can be in any of the following states:
44 * READY immediately serve the first pending request;
45 * BUSY one request is under service, wait for completion;
46 * IDLING do not serve incoming requests immediately, unless
47 * they are "eligible" as defined later.
49 * Scheduling is made looking at the status of all queues,
50 * and the first one in round-robin order is privileged.
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
57 #include <sys/callout.h>
58 #include <sys/malloc.h>
59 #include <sys/module.h>
61 #include <sys/queue.h>
62 #include <sys/sysctl.h>
63 #include "gs_scheduler.h"
65 /* possible states of the scheduler */
67 G_QUEUE_READY = 0, /* Ready to dispatch. */
68 G_QUEUE_BUSY, /* Waiting for a completion. */
69 G_QUEUE_IDLING /* Waiting for a new request. */
72 /* possible queue flags */
74 G_FLAG_COMPLETED = 1, /* Completed a req. in the current budget. */
80 * Queue descriptor, containing reference count, scheduling
81 * state, a queue of pending requests, configuration parameters.
82 * Queues with pending request(s) and not under service are also
83 * stored in a Round Robin (RR) list.
86 struct g_rr_softc *q_sc; /* link to the parent */
88 enum g_rr_state q_status;
89 unsigned int q_service; /* service received so far */
90 int q_slice_end; /* actual slice end in ticks */
91 enum g_rr_flags q_flags; /* queue flags */
92 struct bio_queue_head q_bioq;
94 /* Scheduling parameters */
95 unsigned int q_budget; /* slice size in bytes */
96 unsigned int q_slice_duration; /* slice size in ticks */
97 unsigned int q_wait_ticks; /* wait time for anticipation */
99 /* Stats to drive the various heuristics. */
100 struct g_savg q_thinktime; /* Thinktime average. */
101 struct g_savg q_seekdist; /* Seek distance average. */
103 int q_bionum; /* Number of requests. */
105 off_t q_lastoff; /* Last submitted req. offset. */
106 int q_lastsub; /* Last submitted req. time. */
108 /* Expiration deadline for an empty queue. */
111 TAILQ_ENTRY(g_rr_queue) q_tailq; /* RR list link field */
115 TAILQ_HEAD(g_rr_tailq, g_rr_queue);
117 /* list of scheduler instances */
118 LIST_HEAD(g_scheds, g_rr_softc);
120 /* Default quantum for RR between queues. */
121 #define G_RR_DEFAULT_BUDGET 0x00800000
124 * Per device descriptor, holding the Round Robin list of queues
125 * accessing the disk, a reference to the geom, and the timer.
128 struct g_geom *sc_geom;
131 * sc_active is the queue we are anticipating for.
132 * It is set only in gs_rr_next(), and possibly cleared
133 * only in gs_rr_next() or on a timeout.
134 * The active queue is never in the Round Robin list
135 * even if it has requests queued.
137 struct g_rr_queue *sc_active;
138 struct callout sc_wait; /* timer for sc_active */
140 struct g_rr_tailq sc_rr_tailq; /* the round-robin list */
141 int sc_nqueues; /* number of queues */
144 int sc_in_flight; /* requests in the driver */
146 LIST_ENTRY(g_rr_softc) sc_next;
149 /* Descriptor for bounded values, min and max are constant. */
157 * parameters, config and stats
160 int queues; /* total number of queues */
161 int w_anticipate; /* anticipate writes */
162 int bypass; /* bypass scheduling writes */
164 int units; /* how many instances */
165 /* sc_head is used for debugging */
166 struct g_scheds sc_head; /* first scheduler instance */
168 struct x_bound queue_depth; /* max parallel requests */
169 struct x_bound wait_ms; /* wait time, milliseconds */
170 struct x_bound quantum_ms; /* quantum size, milliseconds */
171 struct x_bound quantum_kb; /* quantum size, Kb (1024 bytes) */
174 int wait_hit; /* success in anticipation */
175 int wait_miss; /* failure in anticipation */
179 * Default parameters for the scheduler. The quantum sizes target
180 * a 80MB/s disk; if the hw is faster or slower the minimum of the
181 * two will have effect: the clients will still be isolated but
182 * the fairness may be limited. A complete solution would involve
183 * the on-line measurement of the actual disk throughput to derive
184 * these parameters. Or we may just choose to ignore service domain
185 * fairness and accept what can be achieved with time-only budgets.
187 static struct g_rr_params me = {
188 .sc_head = LIST_HEAD_INITIALIZER(&me.sc_head),
190 .queue_depth = { 1, 1, 50 },
191 .wait_ms = { 1, 10, 30 },
192 .quantum_ms = { 1, 100, 500 },
193 .quantum_kb = { 16, 8192, 65536 },
196 struct g_rr_params *gs_rr_me = &me;
198 SYSCTL_DECL(_kern_geom_sched);
199 SYSCTL_NODE(_kern_geom_sched, OID_AUTO, rr, CTLFLAG_RW, 0,
200 "GEOM_SCHED ROUND ROBIN stuff");
201 SYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, units, CTLFLAG_RD,
202 &me.units, 0, "Scheduler instances");
203 SYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, queues, CTLFLAG_RD,
204 &me.queues, 0, "Total rr queues");
205 SYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, wait_ms, CTLFLAG_RW,
206 &me.wait_ms.x_cur, 0, "Wait time milliseconds");
207 SYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, quantum_ms, CTLFLAG_RW,
208 &me.quantum_ms.x_cur, 0, "Quantum size milliseconds");
209 SYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, bypass, CTLFLAG_RW,
210 &me.bypass, 0, "Bypass scheduler");
211 SYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, w_anticipate, CTLFLAG_RW,
212 &me.w_anticipate, 0, "Do anticipation on writes");
213 SYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, quantum_kb, CTLFLAG_RW,
214 &me.quantum_kb.x_cur, 0, "Quantum size Kbytes");
215 SYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, queue_depth, CTLFLAG_RW,
216 &me.queue_depth.x_cur, 0, "Maximum simultaneous requests");
217 SYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, wait_hit, CTLFLAG_RW,
218 &me.wait_hit, 0, "Hits in anticipation");
219 SYSCTL_UINT(_kern_geom_sched_rr, OID_AUTO, wait_miss, CTLFLAG_RW,
220 &me.wait_miss, 0, "Misses in anticipation");
223 /* print the status of a queue */
225 gs_rr_dump_q(struct g_rr_queue *qp, int index)
230 TAILQ_FOREACH(bp, &(qp->q_bioq.queue), bio_queue) {
233 printf("--- rr queue %d %p status %d len %d ---\n",
234 index, qp, qp->q_status, l);
238 * Dump the scheduler status when writing to this sysctl variable.
239 * XXX right now we only dump the status of the last instance created.
240 * not a severe issue because this is only for debugging
243 gs_rr_sysctl_status(SYSCTL_HANDLER_ARGS)
246 struct g_rr_softc *sc;
248 error = sysctl_handle_int(oidp, &val, 0, req);
249 if (error || !req->newptr )
252 printf("called %s\n", __FUNCTION__);
254 LIST_FOREACH(sc, &me.sc_head, sc_next) {
256 printf("--- sc %p active %p nqueues %d "
257 "callout %d in_flight %d ---\n",
258 sc, sc->sc_active, sc->sc_nqueues,
259 callout_active(&sc->sc_wait),
261 for (i = 0; i < G_RR_HASH_SIZE; i++) {
262 struct g_rr_queue *qp;
263 LIST_FOREACH(qp, &sc->sc_hash[i], q_hash) {
264 gs_rr_dump_q(qp, tot);
272 SYSCTL_PROC(_kern_geom_sched_rr, OID_AUTO, status,
273 CTLTYPE_UINT | CTLFLAG_RW,
274 0, sizeof(int), gs_rr_sysctl_status, "I", "status");
276 #endif /* DEBUG_QUEUES */
279 * Get a bounded value, optionally convert to a min of t_min ticks.
282 get_bounded(struct x_bound *v, int t_min)
289 else if (x > v->x_max)
292 x = x * hz / 1000; /* convert to ticks */
300 * Get a reference to the queue for bp, using the generic
301 * classification mechanism.
303 static struct g_rr_queue *
304 g_rr_queue_get(struct g_rr_softc *sc, struct bio *bp)
307 return (g_sched_get_class(sc->sc_geom, bp));
311 g_rr_init_class(void *data, void *priv)
313 struct g_rr_softc *sc = data;
314 struct g_rr_queue *qp = priv;
316 gs_bioq_init(&qp->q_bioq);
319 * Set the initial parameters for the client:
320 * slice size in bytes and ticks, and wait ticks.
321 * Right now these are constant, but we could have
322 * autoconfiguration code to adjust the values based on
323 * the actual workload.
325 qp->q_budget = 1024 * get_bounded(&me.quantum_kb, 0);
326 qp->q_slice_duration = get_bounded(&me.quantum_ms, 2);
327 qp->q_wait_ticks = get_bounded(&me.wait_ms, 2);
329 qp->q_sc = sc; /* link to the parent */
330 qp->q_sc->sc_nqueues++;
337 * Release a reference to the queue.
340 g_rr_queue_put(struct g_rr_queue *qp)
343 g_sched_put_class(qp->q_sc->sc_geom, qp);
347 g_rr_fini_class(void *data, void *priv)
349 struct g_rr_queue *qp = priv;
351 KASSERT(gs_bioq_first(&qp->q_bioq) == NULL,
352 ("released nonempty queue"));
353 qp->q_sc->sc_nqueues--;
358 g_rr_queue_expired(struct g_rr_queue *qp)
361 if (qp->q_service >= qp->q_budget)
364 if ((qp->q_flags & G_FLAG_COMPLETED) &&
365 ticks - qp->q_slice_end >= 0)
372 g_rr_should_anticipate(struct g_rr_queue *qp, struct bio *bp)
374 int wait = get_bounded(&me.wait_ms, 2);
376 if (!me.w_anticipate && (bp->bio_cmd & BIO_WRITE))
379 if (g_savg_valid(&qp->q_thinktime) &&
380 g_savg_read(&qp->q_thinktime) > wait)
383 if (g_savg_valid(&qp->q_seekdist) &&
384 g_savg_read(&qp->q_seekdist) > 8192)
391 * Called on a request arrival, timeout or completion.
392 * Try to serve a request among those queued.
395 g_rr_next(void *data, int force)
397 struct g_rr_softc *sc = data;
398 struct g_rr_queue *qp;
399 struct bio *bp, *next;
403 if (me.bypass == 0 && !force) {
404 if (sc->sc_in_flight >= get_bounded(&me.queue_depth, 0))
407 /* Try with the queue under service first. */
408 if (qp != NULL && qp->q_status != G_QUEUE_READY) {
410 * Queue is anticipating, ignore request.
411 * We should check that we are not past
412 * the timeout, but in that case the timeout
413 * will fire immediately afterwards so we
418 } else if (qp != NULL && qp->q_status != G_QUEUE_READY) {
420 sc->sc_active = qp = NULL;
424 * No queue under service, look for the first in RR order.
425 * If we find it, select if as sc_active, clear service
426 * and record the end time of the slice.
429 qp = TAILQ_FIRST(&sc->sc_rr_tailq);
431 return (NULL); /* no queues at all, return */
432 /* otherwise select the new queue for service. */
433 TAILQ_REMOVE(&sc->sc_rr_tailq, qp, q_tailq);
436 qp->q_flags &= ~G_FLAG_COMPLETED;
439 bp = gs_bioq_takefirst(&qp->q_bioq); /* surely not NULL */
440 qp->q_service += bp->bio_length; /* charge the service */
443 * The request at the head of the active queue is always
444 * dispatched, and gs_rr_next() will be called again
446 * We need to prepare for what to do next:
448 * 1. have we reached the end of the (time or service) slice ?
449 * If so, clear sc_active and possibly requeue the previous
450 * active queue if it has more requests pending;
451 * 2. do we have more requests in sc_active ?
452 * If yes, do not anticipate, as gs_rr_next() will run again;
453 * if no, decide whether or not to anticipate depending
454 * on read or writes (e.g., anticipate only on reads).
456 expired = g_rr_queue_expired(qp); /* are we expired ? */
457 next = gs_bioq_first(&qp->q_bioq); /* do we have one more ? */
459 sc->sc_active = NULL;
460 /* Either requeue or release reference. */
462 TAILQ_INSERT_TAIL(&sc->sc_rr_tailq, qp, q_tailq);
465 } else if (next != NULL) {
466 qp->q_status = G_QUEUE_READY;
468 if (!force && g_rr_should_anticipate(qp, bp)) {
470 qp->q_status = G_QUEUE_BUSY;
472 /* do not anticipate, release reference */
474 sc->sc_active = NULL;
477 /* If sc_active != NULL, its q_status is always correct. */
485 g_rr_update_thinktime(struct g_rr_queue *qp)
487 int delta = ticks - qp->q_lastsub, wait = get_bounded(&me.wait_ms, 2);
489 if (qp->q_sc->sc_active != qp)
492 qp->q_lastsub = ticks;
493 delta = (delta > 2 * wait) ? 2 * wait : delta;
494 if (qp->q_bionum > 7)
495 g_savg_add_sample(&qp->q_thinktime, delta);
499 g_rr_update_seekdist(struct g_rr_queue *qp, struct bio *bp)
503 if (qp->q_lastoff > bp->bio_offset)
504 dist = qp->q_lastoff - bp->bio_offset;
506 dist = bp->bio_offset - qp->q_lastoff;
508 if (dist > (8192 * 8))
511 qp->q_lastoff = bp->bio_offset + bp->bio_length;
513 if (qp->q_bionum > 7)
514 g_savg_add_sample(&qp->q_seekdist, dist);
518 * Called when a real request for disk I/O arrives.
519 * Locate the queue associated with the client.
520 * If the queue is the one we are anticipating for, reset its timeout;
521 * if the queue is not in the round robin list, insert it in the list.
522 * On any error, do not queue the request and return -1, the caller
523 * will take care of this request.
526 g_rr_start(void *data, struct bio *bp)
528 struct g_rr_softc *sc = data;
529 struct g_rr_queue *qp;
532 return (-1); /* bypass the scheduler */
534 /* Get the queue for the request. */
535 qp = g_rr_queue_get(sc, bp);
537 return (-1); /* allocation failed, tell upstream */
539 if (gs_bioq_first(&qp->q_bioq) == NULL) {
541 * We are inserting into an empty queue.
542 * Reset its state if it is sc_active,
543 * otherwise insert it in the RR list.
545 if (qp == sc->sc_active) {
546 qp->q_status = G_QUEUE_READY;
547 callout_stop(&sc->sc_wait);
549 g_sched_priv_ref(qp);
550 TAILQ_INSERT_TAIL(&sc->sc_rr_tailq, qp, q_tailq);
554 qp->q_bionum = 1 + qp->q_bionum - (qp->q_bionum >> 3);
556 g_rr_update_thinktime(qp);
557 g_rr_update_seekdist(qp, bp);
559 /* Inherit the reference returned by g_rr_queue_get(). */
560 bp->bio_caller1 = qp;
561 gs_bioq_disksort(&qp->q_bioq, bp);
567 * Callout executed when a queue times out anticipating a new request.
570 g_rr_wait_timeout(void *data)
572 struct g_rr_softc *sc = data;
573 struct g_geom *geom = sc->sc_geom;
577 * We can race with other events, so check if
578 * sc_active is still valid.
580 if (sc->sc_active != NULL) {
581 /* Release the reference to the queue. */
582 g_rr_queue_put(sc->sc_active);
583 sc->sc_active = NULL;
585 me.wait_miss++; /* record the miss */
587 g_sched_dispatch(geom);
588 g_sched_unlock(geom);
592 * Module glue: allocate descriptor, initialize its fields.
595 g_rr_init(struct g_geom *geom)
597 struct g_rr_softc *sc;
599 /* XXX check whether we can sleep */
600 sc = malloc(sizeof *sc, M_GEOM_SCHED, M_NOWAIT | M_ZERO);
602 TAILQ_INIT(&sc->sc_rr_tailq);
603 callout_init(&sc->sc_wait, CALLOUT_MPSAFE);
604 LIST_INSERT_HEAD(&me.sc_head, sc, sc_next);
611 * Module glue -- drain the callout structure, destroy the
612 * hash table and its element, and free the descriptor.
615 g_rr_fini(void *data)
617 struct g_rr_softc *sc = data;
619 callout_drain(&sc->sc_wait);
620 KASSERT(sc->sc_active == NULL, ("still a queue under service"));
621 KASSERT(TAILQ_EMPTY(&sc->sc_rr_tailq), ("still scheduled queues"));
623 LIST_REMOVE(sc, sc_next);
625 free(sc, M_GEOM_SCHED);
629 * Called when the request under service terminates.
630 * Start the anticipation timer if needed.
633 g_rr_done(void *data, struct bio *bp)
635 struct g_rr_softc *sc = data;
636 struct g_rr_queue *qp;
640 qp = bp->bio_caller1;
641 if (qp == sc->sc_active && qp->q_status == G_QUEUE_BUSY) {
642 if (!(qp->q_flags & G_FLAG_COMPLETED)) {
643 qp->q_flags |= G_FLAG_COMPLETED;
644 /* in case we want to make the slice adaptive */
645 qp->q_slice_duration = get_bounded(&me.quantum_ms, 2);
646 qp->q_slice_end = ticks + qp->q_slice_duration;
649 /* The queue is trying anticipation, start the timer. */
650 qp->q_status = G_QUEUE_IDLING;
651 /* may make this adaptive */
652 qp->q_wait_ticks = get_bounded(&me.wait_ms, 2);
654 callout_reset(&sc->sc_wait, qp->q_wait_ticks,
655 g_rr_wait_timeout, sc);
657 g_sched_dispatch(sc->sc_geom);
659 /* Release a reference to the queue. */
664 g_rr_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
665 struct g_consumer *cp, struct g_provider *pp)
667 if (indent == NULL) { /* plaintext */
668 sbuf_printf(sb, " units %d queues %d",
669 me.units, me.queues);
673 static struct g_gsched g_rr = {
675 .gs_priv_size = sizeof(struct g_rr_queue),
676 .gs_init = g_rr_init,
677 .gs_fini = g_rr_fini,
678 .gs_start = g_rr_start,
679 .gs_done = g_rr_done,
680 .gs_next = g_rr_next,
681 .gs_dumpconf = g_rr_dumpconf,
682 .gs_init_class = g_rr_init_class,
683 .gs_fini_class = g_rr_fini_class,
686 DECLARE_GSCHED_MODULE(rr, &g_rr);