1 /* $OpenBSD: pfctl_altq.c,v 1.93 2007/10/15 02:16:35 deraadt Exp $ */
5 * Sony Computer Science Laboratories Inc.
6 * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD$");
24 #define PFIOC_USE_LATEST
25 #define _WANT_FREEBSD_BITSET
27 #include <sys/types.h>
28 #include <sys/bitset.h>
29 #include <sys/ioctl.h>
30 #include <sys/socket.h>
33 #include <netinet/in.h>
34 #include <net/pfvar.h>
47 #include <net/altq/altq.h>
48 #include <net/altq/altq_cbq.h>
49 #include <net/altq/altq_codel.h>
50 #include <net/altq/altq_priq.h>
51 #include <net/altq/altq_hfsc.h>
52 #include <net/altq/altq_fairq.h>
54 #include "pfctl_parser.h"
57 #define is_sc_null(sc) (((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0))
59 static STAILQ_HEAD(interfaces, pfctl_altq) interfaces = STAILQ_HEAD_INITIALIZER(interfaces);
60 static struct hsearch_data queue_map;
61 static struct hsearch_data if_map;
62 static struct hsearch_data qid_map;
64 static struct pfctl_altq *pfaltq_lookup(char *ifname);
65 static struct pfctl_altq *qname_to_pfaltq(const char *, const char *);
66 static u_int32_t qname_to_qid(char *);
68 static int eval_pfqueue_cbq(struct pfctl *, struct pf_altq *,
70 static int cbq_compute_idletime(struct pfctl *, struct pf_altq *);
71 static int check_commit_cbq(int, int, struct pfctl_altq *);
72 static int print_cbq_opts(const struct pf_altq *);
74 static int print_codel_opts(const struct pf_altq *,
75 const struct node_queue_opt *);
77 static int eval_pfqueue_priq(struct pfctl *, struct pf_altq *,
79 static int check_commit_priq(int, int, struct pfctl_altq *);
80 static int print_priq_opts(const struct pf_altq *);
82 static int eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *,
83 struct pfctl_altq *, struct pfctl_altq *);
84 static int check_commit_hfsc(int, int, struct pfctl_altq *);
85 static int print_hfsc_opts(const struct pf_altq *,
86 const struct node_queue_opt *);
88 static int eval_pfqueue_fairq(struct pfctl *, struct pf_altq *,
89 struct pfctl_altq *, struct pfctl_altq *);
90 static int print_fairq_opts(const struct pf_altq *,
91 const struct node_queue_opt *);
92 static int check_commit_fairq(int, int, struct pfctl_altq *);
94 static void gsc_add_sc(struct gen_sc *, struct service_curve *);
95 static int is_gsc_under_sc(struct gen_sc *,
96 struct service_curve *);
97 static struct segment *gsc_getentry(struct gen_sc *, double);
98 static int gsc_add_seg(struct gen_sc *, double, double, double,
100 static double sc_x2y(struct service_curve *, double);
102 u_int32_t getifspeed(char *);
103 u_long getifmtu(char *);
104 int eval_queue_opts(struct pf_altq *, struct node_queue_opt *,
106 u_int64_t eval_bwspec(struct node_queue_bw *, u_int64_t);
107 void print_hfsc_sc(const char *, u_int, u_int, u_int,
108 const struct node_hfsc_sc *);
109 void print_fairq_sc(const char *, u_int, u_int, u_int,
110 const struct node_fairq_sc *);
112 static __attribute__((constructor)) void
113 pfctl_altq_init(void)
116 * As hdestroy() will never be called on these tables, it will be
117 * safe to use references into the stored data as keys.
119 if (hcreate_r(0, &queue_map) == 0)
120 err(1, "Failed to create altq queue map");
121 if (hcreate_r(0, &if_map) == 0)
122 err(1, "Failed to create altq interface map");
123 if (hcreate_r(0, &qid_map) == 0)
124 err(1, "Failed to create altq queue id map");
128 pfaltq_store(struct pf_altq *a)
130 struct pfctl_altq *altq;
135 if ((altq = malloc(sizeof(*altq))) == NULL)
136 err(1, "queue malloc");
137 memcpy(&altq->pa, a, sizeof(struct pf_altq));
138 memset(&altq->meta, 0, sizeof(altq->meta));
140 if (a->qname[0] == 0) {
141 item.key = altq->pa.ifname;
143 if (hsearch_r(item, ENTER, &ret_item, &if_map) == 0)
144 err(1, "interface map insert");
145 STAILQ_INSERT_TAIL(&interfaces, altq, meta.link);
147 key_size = sizeof(a->ifname) + sizeof(a->qname);
148 if ((item.key = malloc(key_size)) == NULL)
149 err(1, "queue map key malloc");
150 snprintf(item.key, key_size, "%s:%s", a->ifname, a->qname);
152 if (hsearch_r(item, ENTER, &ret_item, &queue_map) == 0)
153 err(1, "queue map insert");
155 item.key = altq->pa.qname;
156 item.data = &altq->pa.qid;
157 if (hsearch_r(item, ENTER, &ret_item, &qid_map) == 0)
158 err(1, "qid map insert");
162 static struct pfctl_altq *
163 pfaltq_lookup(char *ifname)
169 if (hsearch_r(item, FIND, &ret_item, &if_map) == 0)
172 return (ret_item->data);
175 static struct pfctl_altq *
176 qname_to_pfaltq(const char *qname, const char *ifname)
180 char key[IFNAMSIZ + PF_QNAME_SIZE];
183 snprintf(item.key, sizeof(key), "%s:%s", ifname, qname);
184 if (hsearch_r(item, FIND, &ret_item, &queue_map) == 0)
187 return (ret_item->data);
191 qname_to_qid(char *qname)
198 * We guarantee that same named queues on different interfaces
202 if (hsearch_r(item, FIND, &ret_item, &qid_map) == 0)
205 qid = *(uint32_t *)ret_item->data;
210 print_altq(const struct pf_altq *a, unsigned int level,
211 struct node_queue_bw *bw, struct node_queue_opt *qopts)
213 if (a->qname[0] != 0) {
214 print_queue(a, level, bw, 1, qopts);
219 if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
223 printf("altq on %s ", a->ifname);
225 switch (a->scheduler) {
227 if (!print_cbq_opts(a))
231 if (!print_priq_opts(a))
235 if (!print_hfsc_opts(a, qopts))
239 if (!print_fairq_opts(a, qopts))
243 if (!print_codel_opts(a, qopts))
248 if (bw != NULL && bw->bw_percent > 0) {
249 if (bw->bw_percent < 100)
250 printf("bandwidth %u%% ", bw->bw_percent);
252 printf("bandwidth %s ", rate2str((double)a->ifbandwidth));
254 if (a->qlimit != DEFAULT_QLIMIT)
255 printf("qlimit %u ", a->qlimit);
256 printf("tbrsize %u ", a->tbrsize);
260 print_queue(const struct pf_altq *a, unsigned int level,
261 struct node_queue_bw *bw, int print_interface,
262 struct node_queue_opt *qopts)
267 if (a->local_flags & PFALTQ_FLAG_IF_REMOVED)
271 for (i = 0; i < level; ++i)
273 printf("%s ", a->qname);
275 printf("on %s ", a->ifname);
276 if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC ||
277 a->scheduler == ALTQT_FAIRQ) {
278 if (bw != NULL && bw->bw_percent > 0) {
279 if (bw->bw_percent < 100)
280 printf("bandwidth %u%% ", bw->bw_percent);
282 printf("bandwidth %s ", rate2str((double)a->bandwidth));
284 if (a->priority != DEFAULT_PRIORITY)
285 printf("priority %u ", a->priority);
286 if (a->qlimit != DEFAULT_QLIMIT)
287 printf("qlimit %u ", a->qlimit);
288 switch (a->scheduler) {
296 print_hfsc_opts(a, qopts);
299 print_fairq_opts(a, qopts);
305 * eval_pfaltq computes the discipline parameters.
308 eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
309 struct node_queue_opt *opts)
312 u_int size, errors = 0;
314 if (bw->bw_absolute > 0)
315 pa->ifbandwidth = bw->bw_absolute;
317 if ((rate = getifspeed(pa->ifname)) == 0) {
318 fprintf(stderr, "interface %s does not know its bandwidth, "
319 "please specify an absolute bandwidth\n",
322 } else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0)
323 pa->ifbandwidth = rate;
326 * Limit bandwidth to UINT_MAX for schedulers that aren't 64-bit ready.
328 if ((pa->scheduler != ALTQT_HFSC) && (pa->ifbandwidth > UINT_MAX)) {
329 pa->ifbandwidth = UINT_MAX;
330 warnx("interface %s bandwidth limited to %" PRIu64 " bps "
331 "because selected scheduler is 32-bit limited\n", pa->ifname,
334 errors += eval_queue_opts(pa, opts, pa->ifbandwidth);
336 /* if tbrsize is not specified, use heuristics */
337 if (pa->tbrsize == 0) {
338 rate = pa->ifbandwidth;
339 if (rate <= 1 * 1000 * 1000)
341 else if (rate <= 10 * 1000 * 1000)
343 else if (rate <= 200 * 1000 * 1000)
345 else if (rate <= 2500 * 1000 * 1000ULL)
349 size = size * getifmtu(pa->ifname);
356 * check_commit_altq does consistency check for each interface
359 check_commit_altq(int dev, int opts)
361 struct pfctl_altq *if_ppa;
364 /* call the discipline check for each interface. */
365 STAILQ_FOREACH(if_ppa, &interfaces, meta.link) {
366 switch (if_ppa->pa.scheduler) {
368 error = check_commit_cbq(dev, opts, if_ppa);
371 error = check_commit_priq(dev, opts, if_ppa);
374 error = check_commit_hfsc(dev, opts, if_ppa);
377 error = check_commit_fairq(dev, opts, if_ppa);
387 * eval_pfqueue computes the queue parameters.
390 eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
391 struct node_queue_opt *opts)
393 /* should be merged with expand_queue */
394 struct pfctl_altq *if_ppa, *parent;
397 /* find the corresponding interface and copy fields used by queues */
398 if ((if_ppa = pfaltq_lookup(pa->ifname)) == NULL) {
399 fprintf(stderr, "altq not defined on %s\n", pa->ifname);
402 pa->scheduler = if_ppa->pa.scheduler;
403 pa->ifbandwidth = if_ppa->pa.ifbandwidth;
405 if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) {
406 fprintf(stderr, "queue %s already exists on interface %s\n",
407 pa->qname, pa->ifname);
410 pa->qid = qname_to_qid(pa->qname);
413 if (pa->parent[0] != 0) {
414 parent = qname_to_pfaltq(pa->parent, pa->ifname);
415 if (parent == NULL) {
416 fprintf(stderr, "parent %s not found for %s\n",
417 pa->parent, pa->qname);
420 pa->parent_qid = parent->pa.qid;
423 pa->qlimit = DEFAULT_QLIMIT;
425 if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC ||
426 pa->scheduler == ALTQT_FAIRQ) {
427 pa->bandwidth = eval_bwspec(bw,
428 parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth);
430 if (pa->bandwidth > pa->ifbandwidth) {
431 fprintf(stderr, "bandwidth for %s higher than "
432 "interface\n", pa->qname);
436 * If not HFSC, then check that the sum of the child
437 * bandwidths is less than the parent's bandwidth. For
438 * HFSC, the equivalent concept is to check that the sum of
439 * the child linkshare service curves are under the parent's
440 * linkshare service curve, and that check is performed by
441 * eval_pfqueue_hfsc().
443 if ((parent != NULL) && (pa->scheduler != ALTQT_HFSC)) {
444 if (pa->bandwidth > parent->pa.bandwidth) {
445 warnx("bandwidth for %s higher than parent",
449 parent->meta.bwsum += pa->bandwidth;
450 if (parent->meta.bwsum > parent->pa.bandwidth) {
451 warnx("the sum of the child bandwidth (%" PRIu64
452 ") higher than parent \"%s\" (%" PRIu64 ")",
453 parent->meta.bwsum, parent->pa.qname,
454 parent->pa.bandwidth);
459 if (eval_queue_opts(pa, opts,
460 parent == NULL ? pa->ifbandwidth : parent->pa.bandwidth))
464 parent->meta.children++;
466 switch (pa->scheduler) {
468 error = eval_pfqueue_cbq(pf, pa, if_ppa);
471 error = eval_pfqueue_priq(pf, pa, if_ppa);
474 error = eval_pfqueue_hfsc(pf, pa, if_ppa, parent);
477 error = eval_pfqueue_fairq(pf, pa, if_ppa, parent);
486 * CBQ support functions
488 #define RM_FILTER_GAIN 5 /* log2 of gain, e.g., 5 => 31/32 */
489 #define RM_NS_PER_SEC (1000000000)
492 eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
494 struct cbq_opts *opts;
497 if (pa->priority >= CBQ_MAXPRI) {
498 warnx("priority out of range: max %d", CBQ_MAXPRI - 1);
502 ifmtu = getifmtu(pa->ifname);
503 opts = &pa->pq_u.cbq_opts;
505 if (opts->pktsize == 0) { /* use default */
506 opts->pktsize = ifmtu;
507 if (opts->pktsize > MCLBYTES) /* do what TCP does */
508 opts->pktsize &= ~MCLBYTES;
509 } else if (opts->pktsize > ifmtu)
510 opts->pktsize = ifmtu;
511 if (opts->maxpktsize == 0) /* use default */
512 opts->maxpktsize = ifmtu;
513 else if (opts->maxpktsize > ifmtu)
514 opts->pktsize = ifmtu;
516 if (opts->pktsize > opts->maxpktsize)
517 opts->pktsize = opts->maxpktsize;
519 if (pa->parent[0] == 0)
520 opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR);
522 if (pa->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS)
523 if_ppa->meta.root_classes++;
524 if (pa->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS)
525 if_ppa->meta.default_classes++;
527 cbq_compute_idletime(pf, pa);
532 * compute ns_per_byte, maxidle, minidle, and offtime
535 cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa)
537 struct cbq_opts *opts;
538 double maxidle_s, maxidle, minidle;
539 double offtime, nsPerByte, ifnsPerByte, ptime, cptime;
540 double z, g, f, gton, gtom;
541 u_int minburst, maxburst;
543 opts = &pa->pq_u.cbq_opts;
544 ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8;
545 minburst = opts->minburst;
546 maxburst = opts->maxburst;
548 if (pa->bandwidth == 0)
549 f = 0.0001; /* small enough? */
551 f = ((double) pa->bandwidth / (double) pa->ifbandwidth);
553 nsPerByte = ifnsPerByte / f;
554 ptime = (double)opts->pktsize * ifnsPerByte;
555 cptime = ptime * (1.0 - f) / f;
557 if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) {
559 * this causes integer overflow in kernel!
560 * (bandwidth < 6Kbps when max_pkt_size=1500)
562 if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0) {
563 warnx("queue bandwidth must be larger than %s",
564 rate2str(ifnsPerByte * (double)opts->maxpktsize /
565 (double)INT_MAX * (double)pa->ifbandwidth));
566 fprintf(stderr, "cbq: queue %s is too slow!\n",
569 nsPerByte = (double)(INT_MAX / opts->maxpktsize);
572 if (maxburst == 0) { /* use default */
573 if (cptime > 10.0 * 1000000)
578 if (minburst == 0) /* use default */
580 if (minburst > maxburst)
583 z = (double)(1 << RM_FILTER_GAIN);
585 gton = pow(g, (double)maxburst);
586 gtom = pow(g, (double)(minburst-1));
587 maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton));
588 maxidle_s = (1.0 - g);
589 if (maxidle > maxidle_s)
590 maxidle = ptime * maxidle;
592 maxidle = ptime * maxidle_s;
593 offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom);
594 minidle = -((double)opts->maxpktsize * (double)nsPerByte);
596 /* scale parameters */
597 maxidle = ((maxidle * 8.0) / nsPerByte) *
598 pow(2.0, (double)RM_FILTER_GAIN);
599 offtime = (offtime * 8.0) / nsPerByte *
600 pow(2.0, (double)RM_FILTER_GAIN);
601 minidle = ((minidle * 8.0) / nsPerByte) *
602 pow(2.0, (double)RM_FILTER_GAIN);
604 maxidle = maxidle / 1000.0;
605 offtime = offtime / 1000.0;
606 minidle = minidle / 1000.0;
608 opts->minburst = minburst;
609 opts->maxburst = maxburst;
610 opts->ns_per_byte = (u_int)nsPerByte;
611 opts->maxidle = (u_int)fabs(maxidle);
612 opts->minidle = (int)minidle;
613 opts->offtime = (u_int)fabs(offtime);
619 check_commit_cbq(int dev, int opts, struct pfctl_altq *if_ppa)
624 * check if cbq has one root queue and one default queue
627 if (if_ppa->meta.root_classes != 1) {
628 warnx("should have one root queue on %s", if_ppa->pa.ifname);
631 if (if_ppa->meta.default_classes != 1) {
632 warnx("should have one default queue on %s", if_ppa->pa.ifname);
639 print_cbq_opts(const struct pf_altq *a)
641 const struct cbq_opts *opts;
643 opts = &a->pq_u.cbq_opts;
646 if (opts->flags & CBQCLF_RED)
648 if (opts->flags & CBQCLF_ECN)
650 if (opts->flags & CBQCLF_RIO)
652 if (opts->flags & CBQCLF_CODEL)
654 if (opts->flags & CBQCLF_CLEARDSCP)
655 printf(" cleardscp");
656 if (opts->flags & CBQCLF_FLOWVALVE)
657 printf(" flowvalve");
658 if (opts->flags & CBQCLF_BORROW)
660 if (opts->flags & CBQCLF_WRR)
662 if (opts->flags & CBQCLF_EFFICIENT)
663 printf(" efficient");
664 if (opts->flags & CBQCLF_ROOTCLASS)
666 if (opts->flags & CBQCLF_DEFCLASS)
676 * PRIQ support functions
679 eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa)
682 if (pa->priority >= PRIQ_MAXPRI) {
683 warnx("priority out of range: max %d", PRIQ_MAXPRI - 1);
686 if (BIT_ISSET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris)) {
687 warnx("%s does not have a unique priority on interface %s",
688 pa->qname, pa->ifname);
691 BIT_SET(QPRI_BITSET_SIZE, pa->priority, &if_ppa->meta.qpris);
693 if (pa->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS)
694 if_ppa->meta.default_classes++;
699 check_commit_priq(int dev, int opts, struct pfctl_altq *if_ppa)
703 * check if priq has one default class for this interface
705 if (if_ppa->meta.default_classes != 1) {
706 warnx("should have one default queue on %s", if_ppa->pa.ifname);
713 print_priq_opts(const struct pf_altq *a)
715 const struct priq_opts *opts;
717 opts = &a->pq_u.priq_opts;
721 if (opts->flags & PRCF_RED)
723 if (opts->flags & PRCF_ECN)
725 if (opts->flags & PRCF_RIO)
727 if (opts->flags & PRCF_CODEL)
729 if (opts->flags & PRCF_CLEARDSCP)
730 printf(" cleardscp");
731 if (opts->flags & PRCF_DEFAULTCLASS)
741 * HFSC support functions
744 eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa, struct pfctl_altq *if_ppa,
745 struct pfctl_altq *parent)
747 struct hfsc_opts_v1 *opts;
748 struct service_curve sc;
750 opts = &pa->pq_u.hfsc_opts;
752 if (parent == NULL) {
754 opts->lssc_m1 = pa->ifbandwidth;
755 opts->lssc_m2 = pa->ifbandwidth;
760 /* First child initializes the parent's service curve accumulators. */
761 if (parent->meta.children == 1) {
762 LIST_INIT(&parent->meta.rtsc);
763 LIST_INIT(&parent->meta.lssc);
766 if (parent->pa.pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) {
767 warnx("adding %s would make default queue %s not a leaf",
768 pa->qname, pa->parent);
772 if (pa->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS)
773 if_ppa->meta.default_classes++;
775 /* if link_share is not specified, use bandwidth */
776 if (opts->lssc_m2 == 0)
777 opts->lssc_m2 = pa->bandwidth;
779 if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) ||
780 (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) ||
781 (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) {
782 warnx("m2 is zero for %s", pa->qname);
786 if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
787 (opts->lssc_m1 < opts->lssc_m2 && opts->lssc_m1 != 0) ||
788 (opts->ulsc_m1 < opts->ulsc_m2 && opts->ulsc_m1 != 0)) {
789 warnx("m1 must be zero for convex curve: %s", pa->qname);
795 * for the real-time service curve, the sum of the service curves
796 * should not exceed 80% of the interface bandwidth. 20% is reserved
797 * not to over-commit the actual interface bandwidth.
798 * for the linkshare service curve, the sum of the child service
799 * curve should not exceed the parent service curve.
800 * for the upper-limit service curve, the assigned bandwidth should
801 * be smaller than the interface bandwidth, and the upper-limit should
802 * be larger than the real-time service curve when both are defined.
805 /* check the real-time service curve. reserve 20% of interface bw */
806 if (opts->rtsc_m2 != 0) {
807 /* add this queue to the sum */
808 sc.m1 = opts->rtsc_m1;
810 sc.m2 = opts->rtsc_m2;
811 gsc_add_sc(&parent->meta.rtsc, &sc);
812 /* compare the sum with 80% of the interface */
815 sc.m2 = pa->ifbandwidth / 100 * 80;
816 if (!is_gsc_under_sc(&parent->meta.rtsc, &sc)) {
817 warnx("real-time sc exceeds 80%% of the interface "
818 "bandwidth (%s)", rate2str((double)sc.m2));
823 /* check the linkshare service curve. */
824 if (opts->lssc_m2 != 0) {
825 /* add this queue to the child sum */
826 sc.m1 = opts->lssc_m1;
828 sc.m2 = opts->lssc_m2;
829 gsc_add_sc(&parent->meta.lssc, &sc);
830 /* compare the sum of the children with parent's sc */
831 sc.m1 = parent->pa.pq_u.hfsc_opts.lssc_m1;
832 sc.d = parent->pa.pq_u.hfsc_opts.lssc_d;
833 sc.m2 = parent->pa.pq_u.hfsc_opts.lssc_m2;
834 if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
835 warnx("linkshare sc exceeds parent's sc");
840 /* check the upper-limit service curve. */
841 if (opts->ulsc_m2 != 0) {
842 if (opts->ulsc_m1 > pa->ifbandwidth ||
843 opts->ulsc_m2 > pa->ifbandwidth) {
844 warnx("upper-limit larger than interface bandwidth");
847 if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) {
848 warnx("upper-limit sc smaller than real-time sc");
857 * FAIRQ support functions
860 eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa,
861 struct pfctl_altq *if_ppa, struct pfctl_altq *parent)
863 struct fairq_opts *opts;
864 struct service_curve sc;
866 opts = &pa->pq_u.fairq_opts;
868 if (parent == NULL) {
870 opts->lssc_m1 = pa->ifbandwidth;
871 opts->lssc_m2 = pa->ifbandwidth;
876 /* First child initializes the parent's service curve accumulator. */
877 if (parent->meta.children == 1)
878 LIST_INIT(&parent->meta.lssc);
880 if (parent->pa.pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) {
881 warnx("adding %s would make default queue %s not a leaf",
882 pa->qname, pa->parent);
886 if (pa->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS)
887 if_ppa->meta.default_classes++;
889 /* if link_share is not specified, use bandwidth */
890 if (opts->lssc_m2 == 0)
891 opts->lssc_m2 = pa->bandwidth;
895 * for the real-time service curve, the sum of the service curves
896 * should not exceed 80% of the interface bandwidth. 20% is reserved
897 * not to over-commit the actual interface bandwidth.
898 * for the link-sharing service curve, the sum of the child service
899 * curve should not exceed the parent service curve.
900 * for the upper-limit service curve, the assigned bandwidth should
901 * be smaller than the interface bandwidth, and the upper-limit should
902 * be larger than the real-time service curve when both are defined.
905 /* check the linkshare service curve. */
906 if (opts->lssc_m2 != 0) {
907 /* add this queue to the child sum */
908 sc.m1 = opts->lssc_m1;
910 sc.m2 = opts->lssc_m2;
911 gsc_add_sc(&parent->meta.lssc, &sc);
912 /* compare the sum of the children with parent's sc */
913 sc.m1 = parent->pa.pq_u.fairq_opts.lssc_m1;
914 sc.d = parent->pa.pq_u.fairq_opts.lssc_d;
915 sc.m2 = parent->pa.pq_u.fairq_opts.lssc_m2;
916 if (!is_gsc_under_sc(&parent->meta.lssc, &sc)) {
917 warnx("link-sharing sc exceeds parent's sc");
926 check_commit_hfsc(int dev, int opts, struct pfctl_altq *if_ppa)
929 /* check if hfsc has one default queue for this interface */
930 if (if_ppa->meta.default_classes != 1) {
931 warnx("should have one default queue on %s", if_ppa->pa.ifname);
938 check_commit_fairq(int dev __unused, int opts __unused, struct pfctl_altq *if_ppa)
941 /* check if fairq has one default queue for this interface */
942 if (if_ppa->meta.default_classes != 1) {
943 warnx("should have one default queue on %s", if_ppa->pa.ifname);
950 print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
952 const struct hfsc_opts_v1 *opts;
953 const struct node_hfsc_sc *rtsc, *lssc, *ulsc;
955 opts = &a->pq_u.hfsc_opts;
957 rtsc = lssc = ulsc = NULL;
959 rtsc = &qopts->data.hfsc_opts.realtime;
960 lssc = &qopts->data.hfsc_opts.linkshare;
961 ulsc = &qopts->data.hfsc_opts.upperlimit;
964 if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 ||
965 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
966 opts->lssc_d != 0))) {
968 if (opts->flags & HFCF_RED)
970 if (opts->flags & HFCF_ECN)
972 if (opts->flags & HFCF_RIO)
974 if (opts->flags & HFCF_CODEL)
976 if (opts->flags & HFCF_CLEARDSCP)
977 printf(" cleardscp");
978 if (opts->flags & HFCF_DEFAULTCLASS)
980 if (opts->rtsc_m2 != 0)
981 print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d,
982 opts->rtsc_m2, rtsc);
983 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
985 print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d,
986 opts->lssc_m2, lssc);
987 if (opts->ulsc_m2 != 0)
988 print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d,
989 opts->ulsc_m2, ulsc);
998 print_codel_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
1000 const struct codel_opts *opts;
1002 opts = &a->pq_u.codel_opts;
1003 if (opts->target || opts->interval || opts->ecn) {
1006 printf(" target %d", opts->target);
1008 printf(" interval %d", opts->interval);
1020 print_fairq_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
1022 const struct fairq_opts *opts;
1023 const struct node_fairq_sc *loc_lssc;
1025 opts = &a->pq_u.fairq_opts;
1029 loc_lssc = &qopts->data.fairq_opts.linkshare;
1032 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1033 opts->lssc_d != 0))) {
1035 if (opts->flags & FARF_RED)
1037 if (opts->flags & FARF_ECN)
1039 if (opts->flags & FARF_RIO)
1041 if (opts->flags & FARF_CODEL)
1043 if (opts->flags & FARF_CLEARDSCP)
1044 printf(" cleardscp");
1045 if (opts->flags & FARF_DEFAULTCLASS)
1047 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1049 print_fairq_sc("linkshare", opts->lssc_m1, opts->lssc_d,
1050 opts->lssc_m2, loc_lssc);
1059 * admission control using generalized service curve
1062 /* add a new service curve to a generalized service curve */
1064 gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc)
1069 gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1);
1070 gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2);
1074 * check whether all points of a generalized service curve have
1075 * their y-coordinates no larger than a given two-piece linear
1079 is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc)
1081 struct segment *s, *last, *end;
1084 if (is_sc_null(sc)) {
1085 if (LIST_EMPTY(gsc))
1087 LIST_FOREACH(s, gsc, _next) {
1094 * gsc has a dummy entry at the end with x = INFINITY.
1095 * loop through up to this dummy entry.
1097 end = gsc_getentry(gsc, INFINITY);
1101 for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) {
1102 if (s->y > sc_x2y(sc, s->x))
1106 /* last now holds the real last segment */
1109 if (last->m > sc->m2)
1111 if (last->x < sc->d && last->m > sc->m1) {
1112 y = last->y + (sc->d - last->x) * last->m;
1113 if (y > sc_x2y(sc, sc->d))
1120 * return a segment entry starting at x.
1121 * if gsc has no entry starting at x, a new entry is created at x.
1123 static struct segment *
1124 gsc_getentry(struct gen_sc *gsc, double x)
1126 struct segment *new, *prev, *s;
1129 LIST_FOREACH(s, gsc, _next) {
1131 return (s); /* matching entry found */
1138 /* we have to create a new entry */
1139 if ((new = calloc(1, sizeof(struct segment))) == NULL)
1143 if (x == INFINITY || s == NULL)
1145 else if (s->x == INFINITY)
1150 /* insert the new entry at the head of the list */
1153 LIST_INSERT_HEAD(gsc, new, _next);
1156 * the start point intersects with the segment pointed by
1157 * prev. divide prev into 2 segments
1159 if (x == INFINITY) {
1166 prev->d = x - prev->x;
1167 new->y = prev->d * prev->m + prev->y;
1170 LIST_INSERT_AFTER(prev, new, _next);
1175 /* add a segment to a generalized service curve */
1177 gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m)
1179 struct segment *start, *end, *s;
1186 start = gsc_getentry(gsc, x);
1187 end = gsc_getentry(gsc, x2);
1188 if (start == NULL || end == NULL)
1191 for (s = start; s != end; s = LIST_NEXT(s, _next)) {
1193 s->y += y + (s->x - x) * m;
1196 end = gsc_getentry(gsc, INFINITY);
1197 for (; s != end; s = LIST_NEXT(s, _next)) {
1204 /* get y-projection of a service curve */
1206 sc_x2y(struct service_curve *sc, double x)
1210 if (x <= (double)sc->d)
1211 /* y belongs to the 1st segment */
1212 y = x * (double)sc->m1;
1214 /* y belongs to the 2nd segment */
1215 y = (double)sc->d * (double)sc->m1
1216 + (x - (double)sc->d) * (double)sc->m2;
1224 #define RATESTR_MAX 16
1227 rate2str(double rate)
1230 static char r2sbuf[R2S_BUFS][RATESTR_MAX]; /* ring bufer */
1233 static const char unit[] = " KMG";
1235 buf = r2sbuf[idx++];
1236 if (idx == R2S_BUFS)
1239 for (i = 0; rate >= 1000 && i <= 3; i++)
1242 if ((int)(rate * 100) % 100)
1243 snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]);
1245 snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]);
1251 getifspeed(char *ifname)
1255 struct if_data ifrdat;
1257 s = get_query_socket();
1258 bzero(&ifr, sizeof(ifr));
1259 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1260 sizeof(ifr.ifr_name))
1261 errx(1, "getifspeed: strlcpy");
1262 ifr.ifr_data = (caddr_t)&ifrdat;
1263 if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1)
1264 err(1, "SIOCGIFDATA");
1265 return ((u_int32_t)ifrdat.ifi_baudrate);
1269 getifmtu(char *ifname)
1274 s = get_query_socket();
1275 bzero(&ifr, sizeof(ifr));
1276 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1277 sizeof(ifr.ifr_name))
1278 errx(1, "getifmtu: strlcpy");
1279 if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1)
1283 err(1, "SIOCGIFMTU");
1285 if (ifr.ifr_mtu > 0)
1286 return (ifr.ifr_mtu);
1288 warnx("could not get mtu for %s, assuming 1500", ifname);
1294 eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts,
1299 switch (pa->scheduler) {
1301 pa->pq_u.cbq_opts = opts->data.cbq_opts;
1304 pa->pq_u.priq_opts = opts->data.priq_opts;
1307 pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags;
1308 if (opts->data.hfsc_opts.linkshare.used) {
1309 pa->pq_u.hfsc_opts.lssc_m1 =
1310 eval_bwspec(&opts->data.hfsc_opts.linkshare.m1,
1312 pa->pq_u.hfsc_opts.lssc_m2 =
1313 eval_bwspec(&opts->data.hfsc_opts.linkshare.m2,
1315 pa->pq_u.hfsc_opts.lssc_d =
1316 opts->data.hfsc_opts.linkshare.d;
1318 if (opts->data.hfsc_opts.realtime.used) {
1319 pa->pq_u.hfsc_opts.rtsc_m1 =
1320 eval_bwspec(&opts->data.hfsc_opts.realtime.m1,
1322 pa->pq_u.hfsc_opts.rtsc_m2 =
1323 eval_bwspec(&opts->data.hfsc_opts.realtime.m2,
1325 pa->pq_u.hfsc_opts.rtsc_d =
1326 opts->data.hfsc_opts.realtime.d;
1328 if (opts->data.hfsc_opts.upperlimit.used) {
1329 pa->pq_u.hfsc_opts.ulsc_m1 =
1330 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1,
1332 pa->pq_u.hfsc_opts.ulsc_m2 =
1333 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2,
1335 pa->pq_u.hfsc_opts.ulsc_d =
1336 opts->data.hfsc_opts.upperlimit.d;
1340 pa->pq_u.fairq_opts.flags = opts->data.fairq_opts.flags;
1341 pa->pq_u.fairq_opts.nbuckets = opts->data.fairq_opts.nbuckets;
1342 pa->pq_u.fairq_opts.hogs_m1 =
1343 eval_bwspec(&opts->data.fairq_opts.hogs_bw, ref_bw);
1345 if (opts->data.fairq_opts.linkshare.used) {
1346 pa->pq_u.fairq_opts.lssc_m1 =
1347 eval_bwspec(&opts->data.fairq_opts.linkshare.m1,
1349 pa->pq_u.fairq_opts.lssc_m2 =
1350 eval_bwspec(&opts->data.fairq_opts.linkshare.m2,
1352 pa->pq_u.fairq_opts.lssc_d =
1353 opts->data.fairq_opts.linkshare.d;
1357 pa->pq_u.codel_opts.target = opts->data.codel_opts.target;
1358 pa->pq_u.codel_opts.interval = opts->data.codel_opts.interval;
1359 pa->pq_u.codel_opts.ecn = opts->data.codel_opts.ecn;
1362 warnx("eval_queue_opts: unknown scheduler type %u",
1372 * If absolute bandwidth if set, return the lesser of that value and the
1373 * reference bandwidth. Limiting to the reference bandwidth allows simple
1374 * limiting of configured bandwidth parameters for schedulers that are
1375 * 32-bit limited, as the root/interface bandwidth (top-level reference
1376 * bandwidth) will be properly limited in that case.
1378 * Otherwise, if the absolute bandwidth is not set, return given percentage
1379 * of reference bandwidth.
1382 eval_bwspec(struct node_queue_bw *bw, u_int64_t ref_bw)
1384 if (bw->bw_absolute > 0)
1385 return (MIN(bw->bw_absolute, ref_bw));
1387 if (bw->bw_percent > 0)
1388 return (ref_bw / 100 * bw->bw_percent);
1394 print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2,
1395 const struct node_hfsc_sc *sc)
1397 printf(" %s", scname);
1401 if (sc != NULL && sc->m1.bw_percent > 0)
1402 printf("%u%%", sc->m1.bw_percent);
1404 printf("%s", rate2str((double)m1));
1408 if (sc != NULL && sc->m2.bw_percent > 0)
1409 printf(" %u%%", sc->m2.bw_percent);
1411 printf(" %s", rate2str((double)m2));
1418 print_fairq_sc(const char *scname, u_int m1, u_int d, u_int m2,
1419 const struct node_fairq_sc *sc)
1421 printf(" %s", scname);
1425 if (sc != NULL && sc->m1.bw_percent > 0)
1426 printf("%u%%", sc->m1.bw_percent);
1428 printf("%s", rate2str((double)m1));
1432 if (sc != NULL && sc->m2.bw_percent > 0)
1433 printf(" %u%%", sc->m2.bw_percent);
1435 printf(" %s", rate2str((double)m2));