2 * Copyright (C) 1998-2003
3 * Sony Computer Science Laboratories Inc. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 1990-1994 Regents of the University of California.
28 * All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 * must display the following acknowledgement:
40 * This product includes software developed by the Computer Systems
41 * Engineering Group at Lawrence Berkeley Laboratory.
42 * 4. Neither the name of the University nor of the Laboratory may be used
43 * to endorse or promote products derived from this software without
44 * specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * $KAME: altq_rio.c,v 1.17 2003/07/10 12:07:49 kjc Exp $
64 #include "opt_inet6.h"
65 #ifdef ALTQ_RIO /* rio is enabled by ALTQ_RIO option in opt_altq.h */
67 #include <sys/param.h>
68 #include <sys/malloc.h>
70 #include <sys/socket.h>
71 #include <sys/systm.h>
72 #include <sys/errno.h>
73 #if 1 /* ALTQ3_COMPAT */
75 #include <sys/sockio.h>
76 #include <sys/kernel.h>
80 #include <net/if_var.h>
82 #include <netinet/in.h>
83 #include <netinet/in_systm.h>
84 #include <netinet/ip.h>
86 #include <netinet/ip6.h>
89 #include <netpfil/pf/pf.h>
90 #include <netpfil/pf/pf_altq.h>
91 #include <net/altq/altq.h>
92 #include <net/altq/altq_cdnr.h>
93 #include <net/altq/altq_red.h>
94 #include <net/altq/altq_rio.h>
96 #include <net/altq/altq_conf.h>
100 * RIO: RED with IN/OUT bit
102 * "Explicit Allocation of Best Effort Packet Delivery Service"
103 * David D. Clark and Wenjia Fang, MIT Lab for Computer Science
104 * http://diffserv.lcs.mit.edu/Papers/exp-alloc-ddc-wf.{ps,pdf}
106 * this implementation is extended to support more than 2 drop precedence
107 * values as described in RFC2597 (Assured Forwarding PHB Group).
111 * AF DS (differentiated service) codepoints.
112 * (classes can be mapped to CBQ or H-FSC classes.)
115 * +---+---+---+---+---+---+---+---+
116 * | CLASS |DropPre| 0 | CU |
117 * +---+---+---+---+---+---+---+---+
125 * medium drop prec: 10
129 /* normal red parameters */
130 #define W_WEIGHT 512 /* inverse of weight of EWMA (511/512) */
131 /* q_weight = 0.00195 */
133 /* red parameters for a slow link */
134 #define W_WEIGHT_1 128 /* inverse of weight of EWMA (127/128) */
135 /* q_weight = 0.0078125 */
137 /* red parameters for a very slow link (e.g., dialup) */
138 #define W_WEIGHT_2 64 /* inverse of weight of EWMA (63/64) */
139 /* q_weight = 0.015625 */
141 /* fixed-point uses 12-bit decimal places */
142 #define FP_SHIFT 12 /* fixed-point shift */
144 /* red parameters for drop probability */
145 #define INV_P_MAX 10 /* inverse of max drop probability */
146 #define TH_MIN 5 /* min threshold */
147 #define TH_MAX 15 /* max threshold */
149 #define RIO_LIMIT 60 /* default max queue length */
150 #define RIO_STATS /* collect statistics */
152 #define TV_DELTA(a, b, delta) { \
155 delta = (a)->tv_usec - (b)->tv_usec; \
156 if ((xxs = (a)->tv_sec - (b)->tv_sec) != 0) { \
159 } else if (xxs > 4) { \
163 delta += xxs * 1000000; \
164 } else while (xxs > 0) { \
172 /* rio_list keeps all rio_queue_t's allocated. */
173 static rio_queue_t *rio_list = NULL;
175 /* default rio parameter values */
176 static struct redparams default_rio_params[RIO_NDROPPREC] = {
177 /* th_min, th_max, inv_pmax */
178 { TH_MAX * 2 + TH_MIN, TH_MAX * 3, INV_P_MAX }, /* low drop precedence */
179 { TH_MAX + TH_MIN, TH_MAX * 2, INV_P_MAX }, /* medium drop precedence */
180 { TH_MIN, TH_MAX, INV_P_MAX } /* high drop precedence */
183 /* internal function prototypes */
184 static int dscp2index(u_int8_t);
186 static int rio_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
187 static struct mbuf *rio_dequeue(struct ifaltq *, int);
188 static int rio_request(struct ifaltq *, int, void *);
189 static int rio_detach(rio_queue_t *);
192 * rio device interface
196 #endif /* ALTQ3_COMPAT */
199 rio_alloc(int weight, struct redparams *params, int flags, int pkttime)
205 rp = malloc(sizeof(rio_t), M_DEVBUF, M_NOWAIT | M_ZERO);
209 rp->rio_flags = flags;
211 /* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
212 rp->rio_pkttime = 800;
214 rp->rio_pkttime = pkttime;
217 rp->rio_weight = weight;
220 rp->rio_weight = W_WEIGHT;
222 /* when the link is very slow, adjust red parameters */
223 npkts_per_sec = 1000000 / rp->rio_pkttime;
224 if (npkts_per_sec < 50) {
225 /* up to about 400Kbps */
226 rp->rio_weight = W_WEIGHT_2;
227 } else if (npkts_per_sec < 300) {
228 /* up to about 2.4Mbps */
229 rp->rio_weight = W_WEIGHT_1;
233 /* calculate wshift. weight must be power of 2 */
235 for (i = 0; w > 1; i++)
238 w = 1 << rp->rio_wshift;
239 if (w != rp->rio_weight) {
240 printf("invalid weight value %d for red! use %d\n",
245 /* allocate weight table */
246 rp->rio_wtab = wtab_alloc(rp->rio_weight);
248 for (i = 0; i < RIO_NDROPPREC; i++) {
249 struct dropprec_state *prec = &rp->rio_precstate[i];
254 if (params == NULL || params[i].inv_pmax == 0)
255 prec->inv_pmax = default_rio_params[i].inv_pmax;
257 prec->inv_pmax = params[i].inv_pmax;
258 if (params == NULL || params[i].th_min == 0)
259 prec->th_min = default_rio_params[i].th_min;
261 prec->th_min = params[i].th_min;
262 if (params == NULL || params[i].th_max == 0)
263 prec->th_max = default_rio_params[i].th_max;
265 prec->th_max = params[i].th_max;
268 * th_min_s and th_max_s are scaled versions of th_min
269 * and th_max to be compared with avg.
271 prec->th_min_s = prec->th_min << (rp->rio_wshift + FP_SHIFT);
272 prec->th_max_s = prec->th_max << (rp->rio_wshift + FP_SHIFT);
275 * precompute probability denominator
276 * probd = (2 * (TH_MAX-TH_MIN) / pmax) in fixed-point
278 prec->probd = (2 * (prec->th_max - prec->th_min)
279 * prec->inv_pmax) << FP_SHIFT;
281 microtime(&prec->last);
288 rio_destroy(rio_t *rp)
290 wtab_destroy(rp->rio_wtab);
295 rio_getstats(rio_t *rp, struct redstats *sp)
299 for (i = 0; i < RIO_NDROPPREC; i++) {
300 bcopy(&rp->q_stats[i], sp, sizeof(struct redstats));
301 sp->q_avg = rp->rio_precstate[i].avg >> rp->rio_wshift;
306 #if (RIO_NDROPPREC == 3)
308 * internally, a drop precedence value is converted to an index
312 dscp2index(u_int8_t dscp)
314 int dpindex = dscp & AF_DROPPRECMASK;
318 return ((dpindex >> 3) - 1);
324 * kludge: when a packet is dequeued, we need to know its drop precedence
325 * in order to keep the queue length of each drop precedence.
326 * use m_pkthdr.rcvif to pass this info.
328 #define RIOM_SET_PRECINDEX(m, idx) \
329 do { (m)->m_pkthdr.rcvif = (void *)((long)(idx)); } while (0)
330 #define RIOM_GET_PRECINDEX(m) \
331 ({ long idx; idx = (long)((m)->m_pkthdr.rcvif); \
332 (m)->m_pkthdr.rcvif = NULL; idx; })
336 rio_addq(rio_t *rp, class_queue_t *q, struct mbuf *m,
337 struct altq_pktattr *pktattr)
340 u_int8_t dsfield, odsfield;
341 int dpindex, i, n, t;
343 struct dropprec_state *prec;
345 dsfield = odsfield = read_dsfield(m, pktattr);
346 dpindex = dscp2index(dsfield);
349 * update avg of the precedence states whose drop precedence
350 * is larger than or equal to the drop precedence of the packet
353 for (i = dpindex; i < RIO_NDROPPREC; i++) {
354 prec = &rp->rio_precstate[i];
360 t = (now.tv_sec - prec->last.tv_sec);
365 (now.tv_usec - prec->last.tv_usec);
366 n = t / rp->rio_pkttime;
367 /* calculate (avg = (1 - Wq)^n * avg) */
369 avg = (avg >> FP_SHIFT) *
370 pow_w(rp->rio_wtab, n);
374 /* run estimator. (avg is scaled by WEIGHT in fixed-point) */
375 avg += (prec->qlen << FP_SHIFT) - (avg >> rp->rio_wshift);
376 prec->avg = avg; /* save the new value */
378 * count keeps a tally of arriving traffic that has not
384 prec = &rp->rio_precstate[dpindex];
387 /* see if we drop early */
388 droptype = DTYPE_NODROP;
389 if (avg >= prec->th_min_s && prec->qlen > 1) {
390 if (avg >= prec->th_max_s) {
391 /* avg >= th_max: forced drop */
392 droptype = DTYPE_FORCED;
393 } else if (prec->old == 0) {
394 /* first exceeds th_min */
397 } else if (drop_early((avg - prec->th_min_s) >> rp->rio_wshift,
398 prec->probd, prec->count)) {
399 /* unforced drop by red */
400 droptype = DTYPE_EARLY;
408 * if the queue length hits the hard limit, it's a forced drop.
410 if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
411 droptype = DTYPE_FORCED;
413 if (droptype != DTYPE_NODROP) {
414 /* always drop incoming packet (as opposed to randomdrop) */
415 for (i = dpindex; i < RIO_NDROPPREC; i++)
416 rp->rio_precstate[i].count = 0;
418 if (droptype == DTYPE_EARLY)
419 rp->q_stats[dpindex].drop_unforced++;
421 rp->q_stats[dpindex].drop_forced++;
422 PKTCNTR_ADD(&rp->q_stats[dpindex].drop_cnt, m_pktlen(m));
428 for (i = dpindex; i < RIO_NDROPPREC; i++)
429 rp->rio_precstate[i].qlen++;
431 /* save drop precedence index in mbuf hdr */
432 RIOM_SET_PRECINDEX(m, dpindex);
434 if (rp->rio_flags & RIOF_CLEARDSCP)
435 dsfield &= ~DSCP_MASK;
437 if (dsfield != odsfield)
438 write_dsfield(m, pktattr, dsfield);
443 PKTCNTR_ADD(&rp->q_stats[dpindex].xmit_cnt, m_pktlen(m));
449 rio_getq(rio_t *rp, class_queue_t *q)
454 if ((m = _getq(q)) == NULL)
457 dpindex = RIOM_GET_PRECINDEX(m);
458 for (i = dpindex; i < RIO_NDROPPREC; i++) {
459 if (--rp->rio_precstate[i].qlen == 0) {
460 if (rp->rio_precstate[i].idle == 0) {
461 rp->rio_precstate[i].idle = 1;
462 microtime(&rp->rio_precstate[i].last);
471 rioopen(dev, flag, fmt, p)
474 #if (__FreeBSD_version > 500000)
480 /* everything will be done when the queueing scheme is attached. */
485 rioclose(dev, flag, fmt, p)
488 #if (__FreeBSD_version > 500000)
497 while ((rqp = rio_list) != NULL) {
499 err = rio_detach(rqp);
500 if (err != 0 && error == 0)
508 rioioctl(dev, cmd, addr, flag, p)
513 #if (__FreeBSD_version > 500000)
520 struct rio_interface *ifacep;
524 /* check super-user privilege */
529 #if (__FreeBSD_version > 700000)
530 if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
532 #elsif (__FreeBSD_version > 400000)
533 if ((error = suser(p)) != 0)
536 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
545 ifacep = (struct rio_interface *)addr;
546 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
550 error = altq_enable(rqp->rq_ifq);
554 ifacep = (struct rio_interface *)addr;
555 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
559 error = altq_disable(rqp->rq_ifq);
563 ifp = ifunit(((struct rio_interface *)addr)->rio_ifname);
569 /* allocate and initialize rio_queue_t */
570 rqp = malloc(sizeof(rio_queue_t), M_DEVBUF, M_WAITOK);
575 bzero(rqp, sizeof(rio_queue_t));
577 rqp->rq_q = malloc(sizeof(class_queue_t),
579 if (rqp->rq_q == NULL) {
584 bzero(rqp->rq_q, sizeof(class_queue_t));
586 rqp->rq_rio = rio_alloc(0, NULL, 0, 0);
587 if (rqp->rq_rio == NULL) {
588 free(rqp->rq_q, M_DEVBUF);
594 rqp->rq_ifq = &ifp->if_snd;
595 qtail(rqp->rq_q) = NULL;
597 qlimit(rqp->rq_q) = RIO_LIMIT;
598 qtype(rqp->rq_q) = Q_RIO;
601 * set RIO to this ifnet structure.
603 error = altq_attach(rqp->rq_ifq, ALTQT_RIO, rqp,
604 rio_enqueue, rio_dequeue, rio_request,
607 rio_destroy(rqp->rq_rio);
608 free(rqp->rq_q, M_DEVBUF);
613 /* add this state to the rio list */
614 rqp->rq_next = rio_list;
619 ifacep = (struct rio_interface *)addr;
620 if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
624 error = rio_detach(rqp);
629 struct rio_stats *q_stats;
633 q_stats = (struct rio_stats *)addr;
634 if ((rqp = altq_lookup(q_stats->iface.rio_ifname,
635 ALTQT_RIO)) == NULL) {
642 q_stats->q_limit = qlimit(rqp->rq_q);
643 q_stats->weight = rp->rio_weight;
644 q_stats->flags = rp->rio_flags;
646 for (i = 0; i < RIO_NDROPPREC; i++) {
647 q_stats->q_len[i] = rp->rio_precstate[i].qlen;
648 bcopy(&rp->q_stats[i], &q_stats->q_stats[i],
649 sizeof(struct redstats));
650 q_stats->q_stats[i].q_avg =
651 rp->rio_precstate[i].avg >> rp->rio_wshift;
653 q_stats->q_params[i].inv_pmax
654 = rp->rio_precstate[i].inv_pmax;
655 q_stats->q_params[i].th_min
656 = rp->rio_precstate[i].th_min;
657 q_stats->q_params[i].th_max
658 = rp->rio_precstate[i].th_max;
660 } while (/*CONSTCOND*/ 0);
669 fc = (struct rio_conf *)addr;
670 if ((rqp = altq_lookup(fc->iface.rio_ifname,
671 ALTQT_RIO)) == NULL) {
676 new = rio_alloc(fc->rio_weight, &fc->q_params[0],
677 fc->rio_flags, fc->rio_pkttime);
685 limit = fc->rio_limit;
686 if (limit < fc->q_params[RIO_NDROPPREC-1].th_max)
687 limit = fc->q_params[RIO_NDROPPREC-1].th_max;
688 qlimit(rqp->rq_q) = limit;
690 rio_destroy(rqp->rq_rio);
695 /* write back new values */
696 fc->rio_limit = limit;
697 for (i = 0; i < RIO_NDROPPREC; i++) {
698 fc->q_params[i].inv_pmax =
699 rqp->rq_rio->rio_precstate[i].inv_pmax;
700 fc->q_params[i].th_min =
701 rqp->rq_rio->rio_precstate[i].th_min;
702 fc->q_params[i].th_max =
703 rqp->rq_rio->rio_precstate[i].th_max;
705 } while (/*CONSTCOND*/ 0);
708 case RIO_SETDEFAULTS:
710 struct redparams *rp;
713 rp = (struct redparams *)addr;
714 for (i = 0; i < RIO_NDROPPREC; i++)
715 default_rio_params[i] = rp[i];
716 } while (/*CONSTCOND*/ 0);
734 if (ALTQ_IS_ENABLED(rqp->rq_ifq))
735 altq_disable(rqp->rq_ifq);
737 if ((error = altq_detach(rqp->rq_ifq)))
741 rio_list = rqp->rq_next;
743 for (tmp = rio_list; tmp != NULL; tmp = tmp->rq_next)
744 if (tmp->rq_next == rqp) {
745 tmp->rq_next = rqp->rq_next;
749 printf("rio_detach: no state found in rio_list!\n");
752 rio_destroy(rqp->rq_rio);
753 free(rqp->rq_q, M_DEVBUF);
759 * rio support routines
762 rio_request(ifq, req, arg)
767 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
769 IFQ_LOCK_ASSERT(ifq);
774 if (ALTQ_IS_ENABLED(ifq))
784 * returns: 0 when successfully queued.
785 * ENOBUFS when drop occurs.
788 rio_enqueue(ifq, m, pktattr)
791 struct altq_pktattr *pktattr;
793 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
796 IFQ_LOCK_ASSERT(ifq);
798 if (rio_addq(rqp->rq_rio, rqp->rq_q, m, pktattr) == 0)
807 * must be called in splimp.
809 * returns: mbuf dequeued.
810 * NULL when no packet is available in the queue.
818 rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
819 struct mbuf *m = NULL;
821 IFQ_LOCK_ASSERT(ifq);
823 if (op == ALTDQ_POLL)
824 return qhead(rqp->rq_q);
826 m = rio_getq(rqp->rq_rio, rqp->rq_q);
834 static struct altqsw rio_sw =
835 {"rio", rioopen, rioclose, rioioctl};
837 ALTQ_MODULE(altq_rio, ALTQT_RIO, &rio_sw);
838 MODULE_VERSION(altq_rio, 1);
839 MODULE_DEPEND(altq_rio, altq_red, 1, 1, 1);
841 #endif /* KLD_MODULE */
842 #endif /* ALTQ3_COMPAT */
844 #endif /* ALTQ_RIO */