2 /* $KAME: altq_hfsc.c,v 1.24 2003/12/05 05:40:46 kjc Exp $ */
5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
7 * Permission to use, copy, modify, and distribute this software and
8 * its documentation is hereby granted (including for commercial or
9 * for-profit use), provided that both the copyright notice and this
10 * permission notice appear in all copies of the software, derivative
11 * works, or modified versions, and any portions thereof.
13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
14 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28 * Carnegie Mellon encourages (but does not require) users of this
29 * software to return any improvements or extensions that they make,
30 * and to grant Carnegie Mellon the rights to redistribute these
31 * changes without encumbrance.
34 * H-FSC is described in Proceedings of SIGCOMM'97,
35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
36 * Real-Time and Priority Service"
37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
40 * when a class has an upperlimit, the fit-time is computed from the
41 * upperlimit service curve. the link-sharing scheduler does not schedule
42 * a class whose fit-time exceeds the current time.
45 #if defined(__FreeBSD__) || defined(__NetBSD__)
49 #include "opt_inet6.h"
51 #endif /* __FreeBSD__ || __NetBSD__ */
53 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
55 #include <sys/param.h>
56 #include <sys/malloc.h>
58 #include <sys/socket.h>
59 #include <sys/systm.h>
60 #include <sys/errno.h>
61 #include <sys/queue.h>
62 #if 1 /* ALTQ3_COMPAT */
63 #include <sys/sockio.h>
65 #include <sys/kernel.h>
66 #endif /* ALTQ3_COMPAT */
69 #include <net/if_var.h>
70 #include <netinet/in.h>
72 #include <netpfil/pf/pf.h>
73 #include <netpfil/pf/pf_altq.h>
74 #include <netpfil/pf/pf_mtag.h>
75 #include <altq/altq.h>
76 #include <altq/altq_hfsc.h>
78 #include <altq/altq_conf.h>
84 static int hfsc_clear_interface(struct hfsc_if *);
85 static int hfsc_request(struct ifaltq *, int, void *);
86 static void hfsc_purge(struct hfsc_if *);
87 static struct hfsc_class *hfsc_class_create(struct hfsc_if *,
88 struct service_curve *, struct service_curve *, struct service_curve *,
89 struct hfsc_class *, int, int, int);
90 static int hfsc_class_destroy(struct hfsc_class *);
91 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *);
92 static int hfsc_enqueue(struct ifaltq *, struct mbuf *,
93 struct altq_pktattr *);
94 static struct mbuf *hfsc_dequeue(struct ifaltq *, int);
96 static int hfsc_addq(struct hfsc_class *, struct mbuf *);
97 static struct mbuf *hfsc_getq(struct hfsc_class *);
98 static struct mbuf *hfsc_pollq(struct hfsc_class *);
99 static void hfsc_purgeq(struct hfsc_class *);
101 static void update_cfmin(struct hfsc_class *);
102 static void set_active(struct hfsc_class *, int);
103 static void set_passive(struct hfsc_class *);
105 static void init_ed(struct hfsc_class *, int);
106 static void update_ed(struct hfsc_class *, int);
107 static void update_d(struct hfsc_class *, int);
108 static void init_vf(struct hfsc_class *, int);
109 static void update_vf(struct hfsc_class *, int, u_int64_t);
110 static void ellist_insert(struct hfsc_class *);
111 static void ellist_remove(struct hfsc_class *);
112 static void ellist_update(struct hfsc_class *);
113 struct hfsc_class *hfsc_get_mindl(struct hfsc_if *, u_int64_t);
114 static void actlist_insert(struct hfsc_class *);
115 static void actlist_remove(struct hfsc_class *);
116 static void actlist_update(struct hfsc_class *);
118 static struct hfsc_class *actlist_firstfit(struct hfsc_class *,
121 static __inline u_int64_t seg_x2y(u_int64_t, u_int64_t);
122 static __inline u_int64_t seg_y2x(u_int64_t, u_int64_t);
123 static __inline u_int64_t m2sm(u_int);
124 static __inline u_int64_t m2ism(u_int);
125 static __inline u_int64_t d2dx(u_int);
126 static u_int sm2m(u_int64_t);
127 static u_int dx2d(u_int64_t);
129 static void sc2isc(struct service_curve *, struct internal_sc *);
130 static void rtsc_init(struct runtime_sc *, struct internal_sc *,
131 u_int64_t, u_int64_t);
132 static u_int64_t rtsc_y2x(struct runtime_sc *, u_int64_t);
133 static u_int64_t rtsc_x2y(struct runtime_sc *, u_int64_t);
134 static void rtsc_min(struct runtime_sc *, struct internal_sc *,
135 u_int64_t, u_int64_t);
137 static void get_class_stats(struct hfsc_classstats *,
138 struct hfsc_class *);
139 static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t);
143 static struct hfsc_if *hfsc_attach(struct ifaltq *, u_int);
144 static int hfsc_detach(struct hfsc_if *);
145 static int hfsc_class_modify(struct hfsc_class *, struct service_curve *,
146 struct service_curve *, struct service_curve *);
148 static int hfsccmd_if_attach(struct hfsc_attach *);
149 static int hfsccmd_if_detach(struct hfsc_interface *);
150 static int hfsccmd_add_class(struct hfsc_add_class *);
151 static int hfsccmd_delete_class(struct hfsc_delete_class *);
152 static int hfsccmd_modify_class(struct hfsc_modify_class *);
153 static int hfsccmd_add_filter(struct hfsc_add_filter *);
154 static int hfsccmd_delete_filter(struct hfsc_delete_filter *);
155 static int hfsccmd_class_stats(struct hfsc_class_stats *);
158 #endif /* ALTQ3_COMPAT */
163 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
165 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */
168 /* hif_list keeps all hfsc_if's allocated. */
169 static struct hfsc_if *hif_list = NULL;
170 #endif /* ALTQ3_COMPAT */
173 hfsc_pfattach(struct pf_altq *a)
178 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
185 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
186 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
192 hfsc_add_altq(struct pf_altq *a)
197 if ((ifp = ifunit(a->ifname)) == NULL)
199 if (!ALTQ_IS_READY(&ifp->if_snd))
202 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_NOWAIT | M_ZERO);
206 TAILQ_INIT(&hif->hif_eligible);
207 hif->hif_ifq = &ifp->if_snd;
209 /* keep the state in pf_altq */
216 hfsc_remove_altq(struct pf_altq *a)
220 if ((hif = a->altq_disc) == NULL)
224 (void)hfsc_clear_interface(hif);
225 (void)hfsc_class_destroy(hif->hif_rootclass);
233 hfsc_add_queue(struct pf_altq *a)
236 struct hfsc_class *cl, *parent;
237 struct hfsc_opts *opts;
238 struct service_curve rtsc, lssc, ulsc;
240 if ((hif = a->altq_disc) == NULL)
243 opts = &a->pq_u.hfsc_opts;
245 if (a->parent_qid == HFSC_NULLCLASS_HANDLE &&
246 hif->hif_rootclass == NULL)
248 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
254 if (clh_to_clp(hif, a->qid) != NULL)
257 rtsc.m1 = opts->rtsc_m1;
258 rtsc.d = opts->rtsc_d;
259 rtsc.m2 = opts->rtsc_m2;
260 lssc.m1 = opts->lssc_m1;
261 lssc.d = opts->lssc_d;
262 lssc.m2 = opts->lssc_m2;
263 ulsc.m1 = opts->ulsc_m1;
264 ulsc.d = opts->ulsc_d;
265 ulsc.m2 = opts->ulsc_m2;
267 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
268 parent, a->qlimit, opts->flags, a->qid);
276 hfsc_remove_queue(struct pf_altq *a)
279 struct hfsc_class *cl;
281 if ((hif = a->altq_disc) == NULL)
284 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
287 return (hfsc_class_destroy(cl));
291 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
294 struct hfsc_class *cl;
295 struct hfsc_classstats stats;
298 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
301 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
304 if (*nbytes < sizeof(stats))
307 get_class_stats(&stats, cl);
309 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
311 *nbytes = sizeof(stats);
316 * bring the interface back to the initial state by discarding
317 * all the filters and classes except the root class.
320 hfsc_clear_interface(struct hfsc_if *hif)
322 struct hfsc_class *cl;
325 /* free the filters for this interface */
326 acc_discard_filters(&hif->hif_classifier, NULL, 1);
329 /* clear out the classes */
330 while (hif->hif_rootclass != NULL &&
331 (cl = hif->hif_rootclass->cl_children) != NULL) {
333 * remove the first leaf class found in the hierarchy
336 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
337 if (!is_a_parent_class(cl)) {
338 (void)hfsc_class_destroy(cl);
348 hfsc_request(struct ifaltq *ifq, int req, void *arg)
350 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
352 IFQ_LOCK_ASSERT(ifq);
362 /* discard all the queued packets on the interface */
364 hfsc_purge(struct hfsc_if *hif)
366 struct hfsc_class *cl;
368 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
369 if (!qempty(cl->cl_q))
371 if (ALTQ_IS_ENABLED(hif->hif_ifq))
372 hif->hif_ifq->ifq_len = 0;
376 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
377 struct service_curve *fsc, struct service_curve *usc,
378 struct hfsc_class *parent, int qlimit, int flags, int qid)
380 struct hfsc_class *cl, *p;
383 if (hif->hif_classes >= HFSC_MAX_CLASSES)
387 if (flags & HFCF_RED) {
389 printf("hfsc_class_create: RED not configured for HFSC!\n");
395 if (flags & HFCF_CODEL) {
397 printf("hfsc_class_create: CODEL not configured for HFSC!\n");
403 cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_NOWAIT | M_ZERO);
407 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO);
408 if (cl->cl_q == NULL)
411 TAILQ_INIT(&cl->cl_actc);
414 qlimit = 50; /* use default */
415 qlimit(cl->cl_q) = qlimit;
416 qtype(cl->cl_q) = Q_DROPTAIL;
419 cl->cl_flags = flags;
421 if (flags & (HFCF_RED|HFCF_RIO)) {
422 int red_flags, red_pkttime;
426 if (rsc != NULL && rsc->m2 > m2)
428 if (fsc != NULL && fsc->m2 > m2)
430 if (usc != NULL && usc->m2 > m2)
434 if (flags & HFCF_ECN)
435 red_flags |= REDF_ECN;
437 if (flags & HFCF_CLEARDSCP)
438 red_flags |= RIOF_CLEARDSCP;
441 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
443 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
444 * 1000 * 1000 * 1000 / (m2 / 8);
445 if (flags & HFCF_RED) {
446 cl->cl_red = red_alloc(0, 0,
447 qlimit(cl->cl_q) * 10/100,
448 qlimit(cl->cl_q) * 30/100,
449 red_flags, red_pkttime);
450 if (cl->cl_red != NULL)
451 qtype(cl->cl_q) = Q_RED;
455 cl->cl_red = (red_t *)rio_alloc(0, NULL,
456 red_flags, red_pkttime);
457 if (cl->cl_red != NULL)
458 qtype(cl->cl_q) = Q_RIO;
462 #endif /* ALTQ_RED */
464 if (flags & HFCF_CODEL) {
465 cl->cl_codel = codel_alloc(5, 100, 0);
466 if (cl->cl_codel != NULL)
467 qtype(cl->cl_q) = Q_CODEL;
471 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
472 cl->cl_rsc = malloc(sizeof(struct internal_sc),
474 if (cl->cl_rsc == NULL)
476 sc2isc(rsc, cl->cl_rsc);
477 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
478 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
480 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
481 cl->cl_fsc = malloc(sizeof(struct internal_sc),
483 if (cl->cl_fsc == NULL)
485 sc2isc(fsc, cl->cl_fsc);
486 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
488 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
489 cl->cl_usc = malloc(sizeof(struct internal_sc),
491 if (cl->cl_usc == NULL)
493 sc2isc(usc, cl->cl_usc);
494 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
497 cl->cl_id = hif->hif_classid++;
500 cl->cl_parent = parent;
507 IFQ_LOCK(hif->hif_ifq);
511 * find a free slot in the class table. if the slot matching
512 * the lower bits of qid is free, use this slot. otherwise,
513 * use the first free slot.
515 i = qid % HFSC_MAX_CLASSES;
516 if (hif->hif_class_tbl[i] == NULL)
517 hif->hif_class_tbl[i] = cl;
519 for (i = 0; i < HFSC_MAX_CLASSES; i++)
520 if (hif->hif_class_tbl[i] == NULL) {
521 hif->hif_class_tbl[i] = cl;
524 if (i == HFSC_MAX_CLASSES) {
525 IFQ_UNLOCK(hif->hif_ifq);
531 if (flags & HFCF_DEFAULTCLASS)
532 hif->hif_defaultclass = cl;
534 if (parent == NULL) {
535 /* this is root class */
536 hif->hif_rootclass = cl;
538 /* add this class to the children list of the parent */
539 if ((p = parent->cl_children) == NULL)
540 parent->cl_children = cl;
542 while (p->cl_siblings != NULL)
547 IFQ_UNLOCK(hif->hif_ifq);
553 if (cl->cl_red != NULL) {
555 if (q_is_rio(cl->cl_q))
556 rio_destroy((rio_t *)cl->cl_red);
559 if (q_is_red(cl->cl_q))
560 red_destroy(cl->cl_red);
563 if (q_is_codel(cl->cl_q))
564 codel_destroy(cl->cl_codel);
567 if (cl->cl_fsc != NULL)
568 free(cl->cl_fsc, M_DEVBUF);
569 if (cl->cl_rsc != NULL)
570 free(cl->cl_rsc, M_DEVBUF);
571 if (cl->cl_usc != NULL)
572 free(cl->cl_usc, M_DEVBUF);
573 if (cl->cl_q != NULL)
574 free(cl->cl_q, M_DEVBUF);
580 hfsc_class_destroy(struct hfsc_class *cl)
587 if (is_a_parent_class(cl))
595 IFQ_LOCK(cl->cl_hif->hif_ifq);
598 /* delete filters referencing to this class */
599 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
600 #endif /* ALTQ3_COMPAT */
602 if (!qempty(cl->cl_q))
605 if (cl->cl_parent == NULL) {
606 /* this is root class */
608 struct hfsc_class *p = cl->cl_parent->cl_children;
611 cl->cl_parent->cl_children = cl->cl_siblings;
613 if (p->cl_siblings == cl) {
614 p->cl_siblings = cl->cl_siblings;
617 } while ((p = p->cl_siblings) != NULL);
621 for (i = 0; i < HFSC_MAX_CLASSES; i++)
622 if (cl->cl_hif->hif_class_tbl[i] == cl) {
623 cl->cl_hif->hif_class_tbl[i] = NULL;
627 cl->cl_hif->hif_classes--;
628 IFQ_UNLOCK(cl->cl_hif->hif_ifq);
631 if (cl->cl_red != NULL) {
633 if (q_is_rio(cl->cl_q))
634 rio_destroy((rio_t *)cl->cl_red);
637 if (q_is_red(cl->cl_q))
638 red_destroy(cl->cl_red);
641 if (q_is_codel(cl->cl_q))
642 codel_destroy(cl->cl_codel);
646 IFQ_LOCK(cl->cl_hif->hif_ifq);
647 if (cl == cl->cl_hif->hif_rootclass)
648 cl->cl_hif->hif_rootclass = NULL;
649 if (cl == cl->cl_hif->hif_defaultclass)
650 cl->cl_hif->hif_defaultclass = NULL;
651 IFQ_UNLOCK(cl->cl_hif->hif_ifq);
653 if (cl->cl_usc != NULL)
654 free(cl->cl_usc, M_DEVBUF);
655 if (cl->cl_fsc != NULL)
656 free(cl->cl_fsc, M_DEVBUF);
657 if (cl->cl_rsc != NULL)
658 free(cl->cl_rsc, M_DEVBUF);
659 free(cl->cl_q, M_DEVBUF);
666 * hfsc_nextclass returns the next class in the tree.
668 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
671 static struct hfsc_class *
672 hfsc_nextclass(struct hfsc_class *cl)
674 if (cl->cl_children != NULL)
675 cl = cl->cl_children;
676 else if (cl->cl_siblings != NULL)
677 cl = cl->cl_siblings;
679 while ((cl = cl->cl_parent) != NULL)
680 if (cl->cl_siblings) {
681 cl = cl->cl_siblings;
690 * hfsc_enqueue is an enqueue function to be registered to
691 * (*altq_enqueue) in struct ifaltq.
694 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
696 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
697 struct hfsc_class *cl;
701 IFQ_LOCK_ASSERT(ifq);
703 /* grab class set by classifier */
704 if ((m->m_flags & M_PKTHDR) == 0) {
705 /* should not happen */
706 printf("altq: packet for %s does not have pkthdr\n",
707 ifq->altq_ifp->if_xname);
712 if ((t = pf_find_mtag(m)) != NULL)
713 cl = clh_to_clp(hif, t->qid);
715 else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
716 cl = pktattr->pattr_class;
718 if (cl == NULL || is_a_parent_class(cl)) {
719 cl = hif->hif_defaultclass;
727 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
730 cl->cl_pktattr = NULL;
732 if (hfsc_addq(cl, m) != 0) {
733 /* drop occurred. mbuf was freed in hfsc_addq. */
734 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
738 cl->cl_hif->hif_packets++;
740 /* successfully queued. */
741 if (qlen(cl->cl_q) == 1)
742 set_active(cl, m_pktlen(m));
748 * hfsc_dequeue is a dequeue function to be registered to
749 * (*altq_dequeue) in struct ifaltq.
751 * note: ALTDQ_POLL returns the next packet without removing the packet
752 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
753 * ALTDQ_REMOVE must return the same packet if called immediately
757 hfsc_dequeue(struct ifaltq *ifq, int op)
759 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
760 struct hfsc_class *cl;
766 IFQ_LOCK_ASSERT(ifq);
768 if (hif->hif_packets == 0)
769 /* no packet in the tree */
772 cur_time = read_machclk();
774 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
776 cl = hif->hif_pollcache;
777 hif->hif_pollcache = NULL;
778 /* check if the class was scheduled by real-time criteria */
779 if (cl->cl_rsc != NULL)
780 realtime = (cl->cl_e <= cur_time);
783 * if there are eligible classes, use real-time criteria.
784 * find the class with the minimum deadline among
785 * the eligible classes.
787 if ((cl = hfsc_get_mindl(hif, cur_time))
795 * use link-sharing criteria
796 * get the class with the minimum vt in the hierarchy
798 cl = hif->hif_rootclass;
799 while (is_a_parent_class(cl)) {
801 cl = actlist_firstfit(cl, cur_time);
805 printf("%d fit but none found\n",fits);
810 * update parent's cl_cvtmin.
811 * don't update if the new vt is smaller.
813 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
814 cl->cl_parent->cl_cvtmin = cl->cl_vt;
821 if (op == ALTDQ_POLL) {
822 hif->hif_pollcache = cl;
830 panic("hfsc_dequeue:");
832 cl->cl_hif->hif_packets--;
834 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
836 update_vf(cl, len, cur_time);
840 if (!qempty(cl->cl_q)) {
841 if (cl->cl_rsc != NULL) {
843 next_len = m_pktlen(qhead(cl->cl_q));
846 update_ed(cl, next_len);
848 update_d(cl, next_len);
851 /* the class becomes passive */
859 hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
863 if (q_is_rio(cl->cl_q))
864 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
868 if (q_is_red(cl->cl_q))
869 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
872 if (q_is_codel(cl->cl_q))
873 return codel_addq(cl->cl_codel, cl->cl_q, m);
875 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
880 if (cl->cl_flags & HFCF_CLEARDSCP)
881 write_dsfield(m, cl->cl_pktattr, 0);
889 hfsc_getq(struct hfsc_class *cl)
892 if (q_is_rio(cl->cl_q))
893 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
896 if (q_is_red(cl->cl_q))
897 return red_getq(cl->cl_red, cl->cl_q);
900 if (q_is_codel(cl->cl_q))
901 return codel_getq(cl->cl_codel, cl->cl_q);
903 return _getq(cl->cl_q);
907 hfsc_pollq(struct hfsc_class *cl)
909 return qhead(cl->cl_q);
913 hfsc_purgeq(struct hfsc_class *cl)
917 if (qempty(cl->cl_q))
920 while ((m = _getq(cl->cl_q)) != NULL) {
921 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
923 cl->cl_hif->hif_packets--;
924 IFQ_DEC_LEN(cl->cl_hif->hif_ifq);
926 ASSERT(qlen(cl->cl_q) == 0);
928 update_vf(cl, 0, 0); /* remove cl from the actlist */
933 set_active(struct hfsc_class *cl, int len)
935 if (cl->cl_rsc != NULL)
937 if (cl->cl_fsc != NULL)
940 cl->cl_stats.period++;
944 set_passive(struct hfsc_class *cl)
946 if (cl->cl_rsc != NULL)
950 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
951 * needs to be called explicitly to remove a class from actlist
956 init_ed(struct hfsc_class *cl, int next_len)
960 cur_time = read_machclk();
962 /* update the deadline curve */
963 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
966 * update the eligible curve.
967 * for concave, it is equal to the deadline curve.
968 * for convex, it is a linear curve with slope m2.
970 cl->cl_eligible = cl->cl_deadline;
971 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
972 cl->cl_eligible.dx = 0;
973 cl->cl_eligible.dy = 0;
976 /* compute e and d */
977 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
978 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
984 update_ed(struct hfsc_class *cl, int next_len)
986 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
987 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
993 update_d(struct hfsc_class *cl, int next_len)
995 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
999 init_vf(struct hfsc_class *cl, int len)
1001 struct hfsc_class *max_cl, *p;
1002 u_int64_t vt, f, cur_time;
1007 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
1009 if (go_active && cl->cl_nactive++ == 0)
1015 max_cl = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead);
1016 if (max_cl != NULL) {
1018 * set vt to the average of the min and max
1019 * classes. if the parent's period didn't
1020 * change, don't decrease vt of the class.
1023 if (cl->cl_parent->cl_cvtmin != 0)
1024 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
1026 if (cl->cl_parent->cl_vtperiod !=
1027 cl->cl_parentperiod || vt > cl->cl_vt)
1031 * first child for a new parent backlog period.
1032 * add parent's cvtmax to vtoff of children
1033 * to make a new vt (vtoff + vt) larger than
1034 * the vt in the last period for all children.
1036 vt = cl->cl_parent->cl_cvtmax;
1037 for (p = cl->cl_parent->cl_children; p != NULL;
1041 cl->cl_parent->cl_cvtmax = 0;
1042 cl->cl_parent->cl_cvtmin = 0;
1044 cl->cl_initvt = cl->cl_vt;
1046 /* update the virtual curve */
1047 vt = cl->cl_vt + cl->cl_vtoff;
1048 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
1049 if (cl->cl_virtual.x == vt) {
1050 cl->cl_virtual.x -= cl->cl_vtoff;
1055 cl->cl_vtperiod++; /* increment vt period */
1056 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
1057 if (cl->cl_parent->cl_nactive == 0)
1058 cl->cl_parentperiod++;
1063 if (cl->cl_usc != NULL) {
1064 /* class has upper limit curve */
1066 cur_time = read_machclk();
1068 /* update the ulimit curve */
1069 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
1072 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
1078 if (cl->cl_myf > cl->cl_cfmin)
1082 if (f != cl->cl_f) {
1084 update_cfmin(cl->cl_parent);
1090 update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
1092 u_int64_t f, myf_bound, delta;
1095 go_passive = qempty(cl->cl_q);
1097 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
1099 cl->cl_total += len;
1101 if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1104 if (go_passive && --cl->cl_nactive == 0)
1110 /* no more active child, going passive */
1112 /* update cvtmax of the parent class */
1113 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1114 cl->cl_parent->cl_cvtmax = cl->cl_vt;
1116 /* remove this class from the vt list */
1119 update_cfmin(cl->cl_parent);
1127 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1128 - cl->cl_vtoff + cl->cl_vtadj;
1131 * if vt of the class is smaller than cvtmin,
1132 * the class was skipped in the past due to non-fit.
1133 * if so, we need to adjust vtadj.
1135 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1136 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1137 cl->cl_vt = cl->cl_parent->cl_cvtmin;
1140 /* update the vt list */
1143 if (cl->cl_usc != NULL) {
1144 cl->cl_myf = cl->cl_myfadj
1145 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1148 * if myf lags behind by more than one clock tick
1149 * from the current time, adjust myfadj to prevent
1150 * a rate-limited class from going greedy.
1151 * in a steady state under rate-limiting, myf
1152 * fluctuates within one clock tick.
1154 myf_bound = cur_time - machclk_per_tick;
1155 if (cl->cl_myf < myf_bound) {
1156 delta = cur_time - cl->cl_myf;
1157 cl->cl_myfadj += delta;
1158 cl->cl_myf += delta;
1162 /* cl_f is max(cl_myf, cl_cfmin) */
1163 if (cl->cl_myf > cl->cl_cfmin)
1167 if (f != cl->cl_f) {
1169 update_cfmin(cl->cl_parent);
1175 update_cfmin(struct hfsc_class *cl)
1177 struct hfsc_class *p;
1180 if (TAILQ_EMPTY(&cl->cl_actc)) {
1184 cfmin = HT_INFINITY;
1185 TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) {
1190 if (p->cl_f < cfmin)
1193 cl->cl_cfmin = cfmin;
1197 * TAILQ based ellist and actlist implementation
1198 * (ion wanted to make a calendar queue based implementation)
1201 * eligible list holds backlogged classes being sorted by their eligible times.
1202 * there is one eligible list per interface.
1206 ellist_insert(struct hfsc_class *cl)
1208 struct hfsc_if *hif = cl->cl_hif;
1209 struct hfsc_class *p;
1211 /* check the last entry first */
1212 if ((p = TAILQ_LAST(&hif->hif_eligible, elighead)) == NULL ||
1213 p->cl_e <= cl->cl_e) {
1214 TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist);
1218 TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) {
1219 if (cl->cl_e < p->cl_e) {
1220 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1224 ASSERT(0); /* should not reach here */
1228 ellist_remove(struct hfsc_class *cl)
1230 struct hfsc_if *hif = cl->cl_hif;
1232 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
1236 ellist_update(struct hfsc_class *cl)
1238 struct hfsc_if *hif = cl->cl_hif;
1239 struct hfsc_class *p, *last;
1242 * the eligible time of a class increases monotonically.
1243 * if the next entry has a larger eligible time, nothing to do.
1245 p = TAILQ_NEXT(cl, cl_ellist);
1246 if (p == NULL || cl->cl_e <= p->cl_e)
1249 /* check the last entry */
1250 last = TAILQ_LAST(&hif->hif_eligible, elighead);
1251 ASSERT(last != NULL);
1252 if (last->cl_e <= cl->cl_e) {
1253 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
1254 TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist);
1259 * the new position must be between the next entry
1260 * and the last entry
1262 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1263 if (cl->cl_e < p->cl_e) {
1264 TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
1265 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1269 ASSERT(0); /* should not reach here */
1272 /* find the class with the minimum deadline among the eligible classes */
1274 hfsc_get_mindl(struct hfsc_if *hif, u_int64_t cur_time)
1276 struct hfsc_class *p, *cl = NULL;
1278 TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) {
1279 if (p->cl_e > cur_time)
1281 if (cl == NULL || p->cl_d < cl->cl_d)
1288 * active children list holds backlogged child classes being sorted
1289 * by their virtual time.
1290 * each intermediate class has one active children list.
1294 actlist_insert(struct hfsc_class *cl)
1296 struct hfsc_class *p;
1298 /* check the last entry first */
1299 if ((p = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead)) == NULL
1300 || p->cl_vt <= cl->cl_vt) {
1301 TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist);
1305 TAILQ_FOREACH(p, &cl->cl_parent->cl_actc, cl_actlist) {
1306 if (cl->cl_vt < p->cl_vt) {
1307 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1311 ASSERT(0); /* should not reach here */
1315 actlist_remove(struct hfsc_class *cl)
1317 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
1321 actlist_update(struct hfsc_class *cl)
1323 struct hfsc_class *p, *last;
1326 * the virtual time of a class increases monotonically during its
1327 * backlogged period.
1328 * if the next entry has a larger virtual time, nothing to do.
1330 p = TAILQ_NEXT(cl, cl_actlist);
1331 if (p == NULL || cl->cl_vt < p->cl_vt)
1334 /* check the last entry */
1335 last = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead);
1336 ASSERT(last != NULL);
1337 if (last->cl_vt <= cl->cl_vt) {
1338 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
1339 TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist);
1344 * the new position must be between the next entry
1345 * and the last entry
1347 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1348 if (cl->cl_vt < p->cl_vt) {
1349 TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
1350 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1354 ASSERT(0); /* should not reach here */
1357 static struct hfsc_class *
1358 actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
1360 struct hfsc_class *p;
1362 TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) {
1363 if (p->cl_f <= cur_time)
1370 * service curve support functions
1372 * external service curve parameters
1375 * internal service curve parameters
1376 * sm: (bytes/tsc_interval) << SM_SHIFT
1377 * ism: (tsc_count/byte) << ISM_SHIFT
1380 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1381 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1382 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1383 * digits in decimal using the following table.
1385 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1386 * ----------+-------------------------------------------------------
1387 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1388 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1389 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1391 * nsec/byte 80000 8000 800 80 8
1392 * ism(500MHz) 40000 4000 400 40 4
1393 * ism(200MHz) 16000 1600 160 16 1.6
1396 #define ISM_SHIFT 10
1398 #define SM_MASK ((1LL << SM_SHIFT) - 1)
1399 #define ISM_MASK ((1LL << ISM_SHIFT) - 1)
1401 static __inline u_int64_t
1402 seg_x2y(u_int64_t x, u_int64_t sm)
1408 * y = x * sm >> SM_SHIFT
1409 * but divide it for the upper and lower bits to avoid overflow
1411 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1415 static __inline u_int64_t
1416 seg_y2x(u_int64_t y, u_int64_t ism)
1422 else if (ism == HT_INFINITY)
1425 x = (y >> ISM_SHIFT) * ism
1426 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1431 static __inline u_int64_t
1436 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1440 static __inline u_int64_t
1448 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1452 static __inline u_int64_t
1457 dx = ((u_int64_t)d * machclk_freq) / 1000;
1466 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1475 d = dx * 1000 / machclk_freq;
1480 sc2isc(struct service_curve *sc, struct internal_sc *isc)
1482 isc->sm1 = m2sm(sc->m1);
1483 isc->ism1 = m2ism(sc->m1);
1484 isc->dx = d2dx(sc->d);
1485 isc->dy = seg_x2y(isc->dx, isc->sm1);
1486 isc->sm2 = m2sm(sc->m2);
1487 isc->ism2 = m2ism(sc->m2);
1491 * initialize the runtime service curve with the given internal
1492 * service curve starting at (x, y).
1495 rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x,
1500 rtsc->sm1 = isc->sm1;
1501 rtsc->ism1 = isc->ism1;
1504 rtsc->sm2 = isc->sm2;
1505 rtsc->ism2 = isc->ism2;
1509 * calculate the y-projection of the runtime service curve by the
1510 * given x-projection value
1513 rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y)
1519 else if (y <= rtsc->y + rtsc->dy) {
1520 /* x belongs to the 1st segment */
1522 x = rtsc->x + rtsc->dx;
1524 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1526 /* x belongs to the 2nd segment */
1527 x = rtsc->x + rtsc->dx
1528 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1534 rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x)
1540 else if (x <= rtsc->x + rtsc->dx)
1541 /* y belongs to the 1st segment */
1542 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1544 /* y belongs to the 2nd segment */
1545 y = rtsc->y + rtsc->dy
1546 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1551 * update the runtime service curve by taking the minimum of the current
1552 * runtime service curve and the service curve starting at (x, y).
1555 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x,
1558 u_int64_t y1, y2, dx, dy;
1560 if (isc->sm1 <= isc->sm2) {
1561 /* service curve is convex */
1562 y1 = rtsc_x2y(rtsc, x);
1564 /* the current rtsc is smaller */
1572 * service curve is concave
1573 * compute the two y values of the current rtsc
1577 y1 = rtsc_x2y(rtsc, x);
1579 /* rtsc is below isc, no change to rtsc */
1583 y2 = rtsc_x2y(rtsc, x + isc->dx);
1584 if (y2 >= y + isc->dy) {
1585 /* rtsc is above isc, replace rtsc by isc */
1594 * the two curves intersect
1595 * compute the offsets (dx, dy) using the reverse
1596 * function of seg_x2y()
1597 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1599 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1601 * check if (x, y1) belongs to the 1st segment of rtsc.
1602 * if so, add the offset.
1604 if (rtsc->x + rtsc->dx > x)
1605 dx += rtsc->x + rtsc->dx - x;
1606 dy = seg_x2y(dx, isc->sm1);
1616 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1618 sp->class_id = cl->cl_id;
1619 sp->class_handle = cl->cl_handle;
1621 if (cl->cl_rsc != NULL) {
1622 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1623 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1624 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1630 if (cl->cl_fsc != NULL) {
1631 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1632 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1633 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1639 if (cl->cl_usc != NULL) {
1640 sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1641 sp->usc.d = dx2d(cl->cl_usc->dx);
1642 sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1649 sp->total = cl->cl_total;
1650 sp->cumul = cl->cl_cumul;
1657 sp->initvt = cl->cl_initvt;
1658 sp->vtperiod = cl->cl_vtperiod;
1659 sp->parentperiod = cl->cl_parentperiod;
1660 sp->nactive = cl->cl_nactive;
1661 sp->vtoff = cl->cl_vtoff;
1662 sp->cvtmax = cl->cl_cvtmax;
1663 sp->myf = cl->cl_myf;
1664 sp->cfmin = cl->cl_cfmin;
1665 sp->cvtmin = cl->cl_cvtmin;
1666 sp->myfadj = cl->cl_myfadj;
1667 sp->vtadj = cl->cl_vtadj;
1669 sp->cur_time = read_machclk();
1670 sp->machclk_freq = machclk_freq;
1672 sp->qlength = qlen(cl->cl_q);
1673 sp->qlimit = qlimit(cl->cl_q);
1674 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1675 sp->drop_cnt = cl->cl_stats.drop_cnt;
1676 sp->period = cl->cl_stats.period;
1678 sp->qtype = qtype(cl->cl_q);
1680 if (q_is_red(cl->cl_q))
1681 red_getstats(cl->cl_red, &sp->red[0]);
1684 if (q_is_rio(cl->cl_q))
1685 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1688 if (q_is_codel(cl->cl_q))
1689 codel_getstats(cl->cl_codel, &sp->codel);
1693 /* convert a class handle to the corresponding class pointer */
1694 static struct hfsc_class *
1695 clh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
1698 struct hfsc_class *cl;
1703 * first, try optimistically the slot matching the lower bits of
1704 * the handle. if it fails, do the linear table search.
1706 i = chandle % HFSC_MAX_CLASSES;
1707 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1709 for (i = 0; i < HFSC_MAX_CLASSES; i++)
1710 if ((cl = hif->hif_class_tbl[i]) != NULL &&
1711 cl->cl_handle == chandle)
1717 static struct hfsc_if *
1718 hfsc_attach(ifq, bandwidth)
1722 struct hfsc_if *hif;
1724 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK);
1727 bzero(hif, sizeof(struct hfsc_if));
1729 hif->hif_eligible = ellist_alloc();
1730 if (hif->hif_eligible == NULL) {
1731 free(hif, M_DEVBUF);
1737 /* add this state to the hfsc list */
1738 hif->hif_next = hif_list;
1746 struct hfsc_if *hif;
1748 (void)hfsc_clear_interface(hif);
1749 (void)hfsc_class_destroy(hif->hif_rootclass);
1751 /* remove this interface from the hif list */
1752 if (hif_list == hif)
1753 hif_list = hif->hif_next;
1757 for (h = hif_list; h != NULL; h = h->hif_next)
1758 if (h->hif_next == hif) {
1759 h->hif_next = hif->hif_next;
1765 ellist_destroy(hif->hif_eligible);
1767 free(hif, M_DEVBUF);
1773 hfsc_class_modify(cl, rsc, fsc, usc)
1774 struct hfsc_class *cl;
1775 struct service_curve *rsc, *fsc, *usc;
1777 struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp;
1781 rsc_tmp = fsc_tmp = usc_tmp = NULL;
1782 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
1783 cl->cl_rsc == NULL) {
1784 rsc_tmp = malloc(sizeof(struct internal_sc),
1785 M_DEVBUF, M_WAITOK);
1786 if (rsc_tmp == NULL)
1789 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
1790 cl->cl_fsc == NULL) {
1791 fsc_tmp = malloc(sizeof(struct internal_sc),
1792 M_DEVBUF, M_WAITOK);
1793 if (fsc_tmp == NULL) {
1798 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) &&
1799 cl->cl_usc == NULL) {
1800 usc_tmp = malloc(sizeof(struct internal_sc),
1801 M_DEVBUF, M_WAITOK);
1802 if (usc_tmp == NULL) {
1809 cur_time = read_machclk();
1815 IFQ_LOCK(cl->cl_hif->hif_ifq);
1818 if (rsc->m1 == 0 && rsc->m2 == 0) {
1819 if (cl->cl_rsc != NULL) {
1820 if (!qempty(cl->cl_q))
1822 free(cl->cl_rsc, M_DEVBUF);
1826 if (cl->cl_rsc == NULL)
1827 cl->cl_rsc = rsc_tmp;
1828 sc2isc(rsc, cl->cl_rsc);
1829 rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time,
1831 cl->cl_eligible = cl->cl_deadline;
1832 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
1833 cl->cl_eligible.dx = 0;
1834 cl->cl_eligible.dy = 0;
1840 if (fsc->m1 == 0 && fsc->m2 == 0) {
1841 if (cl->cl_fsc != NULL) {
1842 if (!qempty(cl->cl_q))
1844 free(cl->cl_fsc, M_DEVBUF);
1848 if (cl->cl_fsc == NULL)
1849 cl->cl_fsc = fsc_tmp;
1850 sc2isc(fsc, cl->cl_fsc);
1851 rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt,
1857 if (usc->m1 == 0 && usc->m2 == 0) {
1858 if (cl->cl_usc != NULL) {
1859 free(cl->cl_usc, M_DEVBUF);
1864 if (cl->cl_usc == NULL)
1865 cl->cl_usc = usc_tmp;
1866 sc2isc(usc, cl->cl_usc);
1867 rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time,
1872 if (!qempty(cl->cl_q)) {
1873 if (cl->cl_rsc != NULL)
1874 update_ed(cl, m_pktlen(qhead(cl->cl_q)));
1875 if (cl->cl_fsc != NULL)
1876 update_vf(cl, 0, cur_time);
1877 /* is this enough? */
1880 IFQ_UNLOCK(cl->cl_hif->hif_ifq);
1887 * hfsc device interface
1890 hfscopen(dev, flag, fmt, p)
1893 #if (__FreeBSD_version > 500000)
1899 if (machclk_freq == 0)
1902 if (machclk_freq == 0) {
1903 printf("hfsc: no cpu clock available!\n");
1907 /* everything will be done when the queueing scheme is attached. */
1912 hfscclose(dev, flag, fmt, p)
1915 #if (__FreeBSD_version > 500000)
1921 struct hfsc_if *hif;
1924 while ((hif = hif_list) != NULL) {
1926 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1927 altq_disable(hif->hif_ifq);
1929 err = altq_detach(hif->hif_ifq);
1931 err = hfsc_detach(hif);
1932 if (err != 0 && error == 0)
1940 hfscioctl(dev, cmd, addr, flag, p)
1945 #if (__FreeBSD_version > 500000)
1951 struct hfsc_if *hif;
1952 struct hfsc_interface *ifacep;
1955 /* check super-user privilege */
1960 #if (__FreeBSD_version > 700000)
1961 if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
1963 #elsif (__FreeBSD_version > 400000)
1964 if ((error = suser(p)) != 0)
1967 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1975 case HFSC_IF_ATTACH:
1976 error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1979 case HFSC_IF_DETACH:
1980 error = hfsccmd_if_detach((struct hfsc_interface *)addr);
1985 case HFSC_CLEAR_HIERARCHY:
1986 ifacep = (struct hfsc_interface *)addr;
1987 if ((hif = altq_lookup(ifacep->hfsc_ifname,
1988 ALTQT_HFSC)) == NULL) {
1996 if (hif->hif_defaultclass == NULL) {
1998 printf("hfsc: no default class\n");
2003 error = altq_enable(hif->hif_ifq);
2007 error = altq_disable(hif->hif_ifq);
2010 case HFSC_CLEAR_HIERARCHY:
2011 hfsc_clear_interface(hif);
2016 case HFSC_ADD_CLASS:
2017 error = hfsccmd_add_class((struct hfsc_add_class *)addr);
2020 case HFSC_DEL_CLASS:
2021 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
2024 case HFSC_MOD_CLASS:
2025 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
2028 case HFSC_ADD_FILTER:
2029 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
2032 case HFSC_DEL_FILTER:
2033 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
2037 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
2048 hfsccmd_if_attach(ap)
2049 struct hfsc_attach *ap;
2051 struct hfsc_if *hif;
2055 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
2058 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
2062 * set HFSC to this ifnet structure.
2064 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
2065 hfsc_enqueue, hfsc_dequeue, hfsc_request,
2066 &hif->hif_classifier, acc_classify)) != 0)
2067 (void)hfsc_detach(hif);
2073 hfsccmd_if_detach(ap)
2074 struct hfsc_interface *ap;
2076 struct hfsc_if *hif;
2079 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
2082 if (ALTQ_IS_ENABLED(hif->hif_ifq))
2083 altq_disable(hif->hif_ifq);
2085 if ((error = altq_detach(hif->hif_ifq)))
2088 return hfsc_detach(hif);
2092 hfsccmd_add_class(ap)
2093 struct hfsc_add_class *ap;
2095 struct hfsc_if *hif;
2096 struct hfsc_class *cl, *parent;
2099 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2102 if (ap->parent_handle == HFSC_NULLCLASS_HANDLE &&
2103 hif->hif_rootclass == NULL)
2105 else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL)
2108 /* assign a class handle (use a free slot number for now) */
2109 for (i = 1; i < HFSC_MAX_CLASSES; i++)
2110 if (hif->hif_class_tbl[i] == NULL)
2112 if (i == HFSC_MAX_CLASSES)
2115 if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL,
2116 parent, ap->qlimit, ap->flags, i)) == NULL)
2119 /* return a class handle to the user */
2120 ap->class_handle = i;
2126 hfsccmd_delete_class(ap)
2127 struct hfsc_delete_class *ap;
2129 struct hfsc_if *hif;
2130 struct hfsc_class *cl;
2132 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2135 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2138 return hfsc_class_destroy(cl);
2142 hfsccmd_modify_class(ap)
2143 struct hfsc_modify_class *ap;
2145 struct hfsc_if *hif;
2146 struct hfsc_class *cl;
2147 struct service_curve *rsc = NULL;
2148 struct service_curve *fsc = NULL;
2149 struct service_curve *usc = NULL;
2151 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2154 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2157 if (ap->sctype & HFSC_REALTIMESC)
2158 rsc = &ap->service_curve;
2159 if (ap->sctype & HFSC_LINKSHARINGSC)
2160 fsc = &ap->service_curve;
2161 if (ap->sctype & HFSC_UPPERLIMITSC)
2162 usc = &ap->service_curve;
2164 return hfsc_class_modify(cl, rsc, fsc, usc);
2168 hfsccmd_add_filter(ap)
2169 struct hfsc_add_filter *ap;
2171 struct hfsc_if *hif;
2172 struct hfsc_class *cl;
2174 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2177 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2180 if (is_a_parent_class(cl)) {
2182 printf("hfsccmd_add_filter: not a leaf class!\n");
2187 return acc_add_filter(&hif->hif_classifier, &ap->filter,
2188 cl, &ap->filter_handle);
2192 hfsccmd_delete_filter(ap)
2193 struct hfsc_delete_filter *ap;
2195 struct hfsc_if *hif;
2197 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2200 return acc_delete_filter(&hif->hif_classifier,
2205 hfsccmd_class_stats(ap)
2206 struct hfsc_class_stats *ap;
2208 struct hfsc_if *hif;
2209 struct hfsc_class *cl;
2210 struct hfsc_classstats stats, *usp;
2211 int n, nclasses, error;
2213 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2216 ap->cur_time = read_machclk();
2217 ap->machclk_freq = machclk_freq;
2218 ap->hif_classes = hif->hif_classes;
2219 ap->hif_packets = hif->hif_packets;
2221 /* skip the first N classes in the tree */
2222 nclasses = ap->nskip;
2223 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
2224 cl = hfsc_nextclass(cl), n++)
2229 /* then, read the next N classes in the tree */
2230 nclasses = ap->nclasses;
2232 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
2234 get_class_stats(&stats, cl);
2236 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
2237 sizeof(stats))) != 0)
2248 static struct altqsw hfsc_sw =
2249 {"hfsc", hfscopen, hfscclose, hfscioctl};
2251 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
2252 MODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1);
2253 MODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1);
2255 #endif /* KLD_MODULE */
2256 #endif /* ALTQ3_COMPAT */
2258 #endif /* ALTQ_HFSC */