2 /* $KAME: altq_hfsc.c,v 1.24 2003/12/05 05:40:46 kjc Exp $ */
5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
7 * Permission to use, copy, modify, and distribute this software and
8 * its documentation is hereby granted (including for commercial or
9 * for-profit use), provided that both the copyright notice and this
10 * permission notice appear in all copies of the software, derivative
11 * works, or modified versions, and any portions thereof.
13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
14 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28 * Carnegie Mellon encourages (but does not require) users of this
29 * software to return any improvements or extensions that they make,
30 * and to grant Carnegie Mellon the rights to redistribute these
31 * changes without encumbrance.
34 * H-FSC is described in Proceedings of SIGCOMM'97,
35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
36 * Real-Time and Priority Service"
37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
40 * when a class has an upperlimit, the fit-time is computed from the
41 * upperlimit service curve. the link-sharing scheduler does not schedule
42 * a class whose fit-time exceeds the current time.
45 #if defined(__FreeBSD__) || defined(__NetBSD__)
49 #include "opt_inet6.h"
51 #endif /* __FreeBSD__ || __NetBSD__ */
53 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
55 #include <sys/param.h>
56 #include <sys/malloc.h>
58 #include <sys/socket.h>
59 #include <sys/systm.h>
60 #include <sys/errno.h>
61 #include <sys/queue.h>
62 #if 1 /* ALTQ3_COMPAT */
63 #include <sys/sockio.h>
65 #include <sys/kernel.h>
66 #endif /* ALTQ3_COMPAT */
69 #include <netinet/in.h>
71 #include <net/pfvar.h>
72 #include <altq/altq.h>
73 #include <altq/altq_hfsc.h>
75 #include <altq/altq_conf.h>
81 static int hfsc_clear_interface(struct hfsc_if *);
82 static int hfsc_request(struct ifaltq *, int, void *);
83 static void hfsc_purge(struct hfsc_if *);
84 static struct hfsc_class *hfsc_class_create(struct hfsc_if *,
85 struct service_curve *, struct service_curve *, struct service_curve *,
86 struct hfsc_class *, int, int, int);
87 static int hfsc_class_destroy(struct hfsc_class *);
88 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *);
89 static int hfsc_enqueue(struct ifaltq *, struct mbuf *,
90 struct altq_pktattr *);
91 static struct mbuf *hfsc_dequeue(struct ifaltq *, int);
93 static int hfsc_addq(struct hfsc_class *, struct mbuf *);
94 static struct mbuf *hfsc_getq(struct hfsc_class *);
95 static struct mbuf *hfsc_pollq(struct hfsc_class *);
96 static void hfsc_purgeq(struct hfsc_class *);
98 static void update_cfmin(struct hfsc_class *);
99 static void set_active(struct hfsc_class *, int);
100 static void set_passive(struct hfsc_class *);
102 static void init_ed(struct hfsc_class *, int);
103 static void update_ed(struct hfsc_class *, int);
104 static void update_d(struct hfsc_class *, int);
105 static void init_vf(struct hfsc_class *, int);
106 static void update_vf(struct hfsc_class *, int, u_int64_t);
107 static ellist_t *ellist_alloc(void);
108 static void ellist_destroy(ellist_t *);
109 static void ellist_insert(struct hfsc_class *);
110 static void ellist_remove(struct hfsc_class *);
111 static void ellist_update(struct hfsc_class *);
112 struct hfsc_class *ellist_get_mindl(ellist_t *, u_int64_t);
113 static actlist_t *actlist_alloc(void);
114 static void actlist_destroy(actlist_t *);
115 static void actlist_insert(struct hfsc_class *);
116 static void actlist_remove(struct hfsc_class *);
117 static void actlist_update(struct hfsc_class *);
119 static struct hfsc_class *actlist_firstfit(struct hfsc_class *,
122 static __inline u_int64_t seg_x2y(u_int64_t, u_int64_t);
123 static __inline u_int64_t seg_y2x(u_int64_t, u_int64_t);
124 static __inline u_int64_t m2sm(u_int);
125 static __inline u_int64_t m2ism(u_int);
126 static __inline u_int64_t d2dx(u_int);
127 static u_int sm2m(u_int64_t);
128 static u_int dx2d(u_int64_t);
130 static void sc2isc(struct service_curve *, struct internal_sc *);
131 static void rtsc_init(struct runtime_sc *, struct internal_sc *,
132 u_int64_t, u_int64_t);
133 static u_int64_t rtsc_y2x(struct runtime_sc *, u_int64_t);
134 static u_int64_t rtsc_x2y(struct runtime_sc *, u_int64_t);
135 static void rtsc_min(struct runtime_sc *, struct internal_sc *,
136 u_int64_t, u_int64_t);
138 static void get_class_stats(struct hfsc_classstats *,
139 struct hfsc_class *);
140 static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t);
144 static struct hfsc_if *hfsc_attach(struct ifaltq *, u_int);
145 static int hfsc_detach(struct hfsc_if *);
146 static int hfsc_class_modify(struct hfsc_class *, struct service_curve *,
147 struct service_curve *, struct service_curve *);
149 static int hfsccmd_if_attach(struct hfsc_attach *);
150 static int hfsccmd_if_detach(struct hfsc_interface *);
151 static int hfsccmd_add_class(struct hfsc_add_class *);
152 static int hfsccmd_delete_class(struct hfsc_delete_class *);
153 static int hfsccmd_modify_class(struct hfsc_modify_class *);
154 static int hfsccmd_add_filter(struct hfsc_add_filter *);
155 static int hfsccmd_delete_filter(struct hfsc_delete_filter *);
156 static int hfsccmd_class_stats(struct hfsc_class_stats *);
159 #endif /* ALTQ3_COMPAT */
164 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
166 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */
169 /* hif_list keeps all hfsc_if's allocated. */
170 static struct hfsc_if *hif_list = NULL;
171 #endif /* ALTQ3_COMPAT */
174 hfsc_pfattach(struct pf_altq *a)
179 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
186 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
187 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
193 hfsc_add_altq(struct pf_altq *a)
198 if ((ifp = ifunit(a->ifname)) == NULL)
200 if (!ALTQ_IS_READY(&ifp->if_snd))
203 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK);
206 bzero(hif, sizeof(struct hfsc_if));
208 hif->hif_eligible = ellist_alloc();
209 if (hif->hif_eligible == NULL) {
214 hif->hif_ifq = &ifp->if_snd;
216 /* keep the state in pf_altq */
223 hfsc_remove_altq(struct pf_altq *a)
227 if ((hif = a->altq_disc) == NULL)
231 (void)hfsc_clear_interface(hif);
232 (void)hfsc_class_destroy(hif->hif_rootclass);
234 ellist_destroy(hif->hif_eligible);
242 hfsc_add_queue(struct pf_altq *a)
245 struct hfsc_class *cl, *parent;
246 struct hfsc_opts *opts;
247 struct service_curve rtsc, lssc, ulsc;
249 if ((hif = a->altq_disc) == NULL)
252 opts = &a->pq_u.hfsc_opts;
254 if (a->parent_qid == HFSC_NULLCLASS_HANDLE &&
255 hif->hif_rootclass == NULL)
257 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
263 if (clh_to_clp(hif, a->qid) != NULL)
266 rtsc.m1 = opts->rtsc_m1;
267 rtsc.d = opts->rtsc_d;
268 rtsc.m2 = opts->rtsc_m2;
269 lssc.m1 = opts->lssc_m1;
270 lssc.d = opts->lssc_d;
271 lssc.m2 = opts->lssc_m2;
272 ulsc.m1 = opts->ulsc_m1;
273 ulsc.d = opts->ulsc_d;
274 ulsc.m2 = opts->ulsc_m2;
276 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
277 parent, a->qlimit, opts->flags, a->qid);
285 hfsc_remove_queue(struct pf_altq *a)
288 struct hfsc_class *cl;
290 if ((hif = a->altq_disc) == NULL)
293 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
296 return (hfsc_class_destroy(cl));
300 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
303 struct hfsc_class *cl;
304 struct hfsc_classstats stats;
307 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
310 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
313 if (*nbytes < sizeof(stats))
316 get_class_stats(&stats, cl);
318 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
320 *nbytes = sizeof(stats);
325 * bring the interface back to the initial state by discarding
326 * all the filters and classes except the root class.
329 hfsc_clear_interface(struct hfsc_if *hif)
331 struct hfsc_class *cl;
334 /* free the filters for this interface */
335 acc_discard_filters(&hif->hif_classifier, NULL, 1);
338 /* clear out the classes */
339 while (hif->hif_rootclass != NULL &&
340 (cl = hif->hif_rootclass->cl_children) != NULL) {
342 * remove the first leaf class found in the hierarchy
345 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
346 if (!is_a_parent_class(cl)) {
347 (void)hfsc_class_destroy(cl);
357 hfsc_request(struct ifaltq *ifq, int req, void *arg)
359 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
361 IFQ_LOCK_ASSERT(ifq);
371 /* discard all the queued packets on the interface */
373 hfsc_purge(struct hfsc_if *hif)
375 struct hfsc_class *cl;
377 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
378 if (!qempty(cl->cl_q))
380 if (ALTQ_IS_ENABLED(hif->hif_ifq))
381 hif->hif_ifq->ifq_len = 0;
385 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
386 struct service_curve *fsc, struct service_curve *usc,
387 struct hfsc_class *parent, int qlimit, int flags, int qid)
389 struct hfsc_class *cl, *p;
392 if (hif->hif_classes >= HFSC_MAX_CLASSES)
396 if (flags & HFCF_RED) {
398 printf("hfsc_class_create: RED not configured for HFSC!\n");
404 cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_WAITOK);
407 bzero(cl, sizeof(struct hfsc_class));
409 cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_WAITOK);
410 if (cl->cl_q == NULL)
412 bzero(cl->cl_q, sizeof(class_queue_t));
414 cl->cl_actc = actlist_alloc();
415 if (cl->cl_actc == NULL)
419 qlimit = 50; /* use default */
420 qlimit(cl->cl_q) = qlimit;
421 qtype(cl->cl_q) = Q_DROPTAIL;
423 cl->cl_flags = flags;
425 if (flags & (HFCF_RED|HFCF_RIO)) {
426 int red_flags, red_pkttime;
430 if (rsc != NULL && rsc->m2 > m2)
432 if (fsc != NULL && fsc->m2 > m2)
434 if (usc != NULL && usc->m2 > m2)
438 if (flags & HFCF_ECN)
439 red_flags |= REDF_ECN;
441 if (flags & HFCF_CLEARDSCP)
442 red_flags |= RIOF_CLEARDSCP;
445 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
447 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
448 * 1000 * 1000 * 1000 / (m2 / 8);
449 if (flags & HFCF_RED) {
450 cl->cl_red = red_alloc(0, 0,
451 qlimit(cl->cl_q) * 10/100,
452 qlimit(cl->cl_q) * 30/100,
453 red_flags, red_pkttime);
454 if (cl->cl_red != NULL)
455 qtype(cl->cl_q) = Q_RED;
459 cl->cl_red = (red_t *)rio_alloc(0, NULL,
460 red_flags, red_pkttime);
461 if (cl->cl_red != NULL)
462 qtype(cl->cl_q) = Q_RIO;
466 #endif /* ALTQ_RED */
468 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
469 cl->cl_rsc = malloc(sizeof(struct internal_sc),
471 if (cl->cl_rsc == NULL)
473 sc2isc(rsc, cl->cl_rsc);
474 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
475 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
477 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
478 cl->cl_fsc = malloc(sizeof(struct internal_sc),
480 if (cl->cl_fsc == NULL)
482 sc2isc(fsc, cl->cl_fsc);
483 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
485 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
486 cl->cl_usc = malloc(sizeof(struct internal_sc),
488 if (cl->cl_usc == NULL)
490 sc2isc(usc, cl->cl_usc);
491 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
494 cl->cl_id = hif->hif_classid++;
497 cl->cl_parent = parent;
504 IFQ_LOCK(hif->hif_ifq);
508 * find a free slot in the class table. if the slot matching
509 * the lower bits of qid is free, use this slot. otherwise,
510 * use the first free slot.
512 i = qid % HFSC_MAX_CLASSES;
513 if (hif->hif_class_tbl[i] == NULL)
514 hif->hif_class_tbl[i] = cl;
516 for (i = 0; i < HFSC_MAX_CLASSES; i++)
517 if (hif->hif_class_tbl[i] == NULL) {
518 hif->hif_class_tbl[i] = cl;
521 if (i == HFSC_MAX_CLASSES) {
522 IFQ_UNLOCK(hif->hif_ifq);
528 if (flags & HFCF_DEFAULTCLASS)
529 hif->hif_defaultclass = cl;
531 if (parent == NULL) {
532 /* this is root class */
533 hif->hif_rootclass = cl;
535 /* add this class to the children list of the parent */
536 if ((p = parent->cl_children) == NULL)
537 parent->cl_children = cl;
539 while (p->cl_siblings != NULL)
544 IFQ_UNLOCK(hif->hif_ifq);
550 if (cl->cl_actc != NULL)
551 actlist_destroy(cl->cl_actc);
552 if (cl->cl_red != NULL) {
554 if (q_is_rio(cl->cl_q))
555 rio_destroy((rio_t *)cl->cl_red);
558 if (q_is_red(cl->cl_q))
559 red_destroy(cl->cl_red);
562 if (cl->cl_fsc != NULL)
563 free(cl->cl_fsc, M_DEVBUF);
564 if (cl->cl_rsc != NULL)
565 free(cl->cl_rsc, M_DEVBUF);
566 if (cl->cl_usc != NULL)
567 free(cl->cl_usc, M_DEVBUF);
568 if (cl->cl_q != NULL)
569 free(cl->cl_q, M_DEVBUF);
575 hfsc_class_destroy(struct hfsc_class *cl)
582 if (is_a_parent_class(cl))
590 IFQ_LOCK(cl->cl_hif->hif_ifq);
593 /* delete filters referencing to this class */
594 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
595 #endif /* ALTQ3_COMPAT */
597 if (!qempty(cl->cl_q))
600 if (cl->cl_parent == NULL) {
601 /* this is root class */
603 struct hfsc_class *p = cl->cl_parent->cl_children;
606 cl->cl_parent->cl_children = cl->cl_siblings;
608 if (p->cl_siblings == cl) {
609 p->cl_siblings = cl->cl_siblings;
612 } while ((p = p->cl_siblings) != NULL);
616 for (i = 0; i < HFSC_MAX_CLASSES; i++)
617 if (cl->cl_hif->hif_class_tbl[i] == cl) {
618 cl->cl_hif->hif_class_tbl[i] = NULL;
622 cl->cl_hif->hif_classes--;
623 IFQ_UNLOCK(cl->cl_hif->hif_ifq);
626 actlist_destroy(cl->cl_actc);
628 if (cl->cl_red != NULL) {
630 if (q_is_rio(cl->cl_q))
631 rio_destroy((rio_t *)cl->cl_red);
634 if (q_is_red(cl->cl_q))
635 red_destroy(cl->cl_red);
639 IFQ_LOCK(cl->cl_hif->hif_ifq);
640 if (cl == cl->cl_hif->hif_rootclass)
641 cl->cl_hif->hif_rootclass = NULL;
642 if (cl == cl->cl_hif->hif_defaultclass)
643 cl->cl_hif->hif_defaultclass = NULL;
644 IFQ_UNLOCK(cl->cl_hif->hif_ifq);
646 if (cl->cl_usc != NULL)
647 free(cl->cl_usc, M_DEVBUF);
648 if (cl->cl_fsc != NULL)
649 free(cl->cl_fsc, M_DEVBUF);
650 if (cl->cl_rsc != NULL)
651 free(cl->cl_rsc, M_DEVBUF);
652 free(cl->cl_q, M_DEVBUF);
659 * hfsc_nextclass returns the next class in the tree.
661 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
664 static struct hfsc_class *
665 hfsc_nextclass(struct hfsc_class *cl)
667 if (cl->cl_children != NULL)
668 cl = cl->cl_children;
669 else if (cl->cl_siblings != NULL)
670 cl = cl->cl_siblings;
672 while ((cl = cl->cl_parent) != NULL)
673 if (cl->cl_siblings) {
674 cl = cl->cl_siblings;
683 * hfsc_enqueue is an enqueue function to be registered to
684 * (*altq_enqueue) in struct ifaltq.
687 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
689 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
690 struct hfsc_class *cl;
694 IFQ_LOCK_ASSERT(ifq);
696 /* grab class set by classifier */
697 if ((m->m_flags & M_PKTHDR) == 0) {
698 /* should not happen */
699 printf("altq: packet for %s does not have pkthdr\n",
700 ifq->altq_ifp->if_xname);
705 if ((t = pf_find_mtag(m)) != NULL)
706 cl = clh_to_clp(hif, t->qid);
708 else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
709 cl = pktattr->pattr_class;
711 if (cl == NULL || is_a_parent_class(cl)) {
712 cl = hif->hif_defaultclass;
720 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
723 cl->cl_pktattr = NULL;
725 if (hfsc_addq(cl, m) != 0) {
726 /* drop occurred. mbuf was freed in hfsc_addq. */
727 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
731 cl->cl_hif->hif_packets++;
733 /* successfully queued. */
734 if (qlen(cl->cl_q) == 1)
735 set_active(cl, m_pktlen(m));
741 * hfsc_dequeue is a dequeue function to be registered to
742 * (*altq_dequeue) in struct ifaltq.
744 * note: ALTDQ_POLL returns the next packet without removing the packet
745 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
746 * ALTDQ_REMOVE must return the same packet if called immediately
750 hfsc_dequeue(struct ifaltq *ifq, int op)
752 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
753 struct hfsc_class *cl;
759 IFQ_LOCK_ASSERT(ifq);
761 if (hif->hif_packets == 0)
762 /* no packet in the tree */
765 cur_time = read_machclk();
767 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
769 cl = hif->hif_pollcache;
770 hif->hif_pollcache = NULL;
771 /* check if the class was scheduled by real-time criteria */
772 if (cl->cl_rsc != NULL)
773 realtime = (cl->cl_e <= cur_time);
776 * if there are eligible classes, use real-time criteria.
777 * find the class with the minimum deadline among
778 * the eligible classes.
780 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time))
788 * use link-sharing criteria
789 * get the class with the minimum vt in the hierarchy
791 cl = hif->hif_rootclass;
792 while (is_a_parent_class(cl)) {
794 cl = actlist_firstfit(cl, cur_time);
798 printf("%d fit but none found\n",fits);
803 * update parent's cl_cvtmin.
804 * don't update if the new vt is smaller.
806 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
807 cl->cl_parent->cl_cvtmin = cl->cl_vt;
814 if (op == ALTDQ_POLL) {
815 hif->hif_pollcache = cl;
823 panic("hfsc_dequeue:");
825 cl->cl_hif->hif_packets--;
827 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
829 update_vf(cl, len, cur_time);
833 if (!qempty(cl->cl_q)) {
834 if (cl->cl_rsc != NULL) {
836 next_len = m_pktlen(qhead(cl->cl_q));
839 update_ed(cl, next_len);
841 update_d(cl, next_len);
844 /* the class becomes passive */
852 hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
856 if (q_is_rio(cl->cl_q))
857 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
861 if (q_is_red(cl->cl_q))
862 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
864 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
869 if (cl->cl_flags & HFCF_CLEARDSCP)
870 write_dsfield(m, cl->cl_pktattr, 0);
878 hfsc_getq(struct hfsc_class *cl)
881 if (q_is_rio(cl->cl_q))
882 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
885 if (q_is_red(cl->cl_q))
886 return red_getq(cl->cl_red, cl->cl_q);
888 return _getq(cl->cl_q);
892 hfsc_pollq(struct hfsc_class *cl)
894 return qhead(cl->cl_q);
898 hfsc_purgeq(struct hfsc_class *cl)
902 if (qempty(cl->cl_q))
905 while ((m = _getq(cl->cl_q)) != NULL) {
906 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
908 cl->cl_hif->hif_packets--;
909 IFQ_DEC_LEN(cl->cl_hif->hif_ifq);
911 ASSERT(qlen(cl->cl_q) == 0);
913 update_vf(cl, 0, 0); /* remove cl from the actlist */
918 set_active(struct hfsc_class *cl, int len)
920 if (cl->cl_rsc != NULL)
922 if (cl->cl_fsc != NULL)
925 cl->cl_stats.period++;
929 set_passive(struct hfsc_class *cl)
931 if (cl->cl_rsc != NULL)
935 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
936 * needs to be called explicitly to remove a class from actlist
941 init_ed(struct hfsc_class *cl, int next_len)
945 cur_time = read_machclk();
947 /* update the deadline curve */
948 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
951 * update the eligible curve.
952 * for concave, it is equal to the deadline curve.
953 * for convex, it is a linear curve with slope m2.
955 cl->cl_eligible = cl->cl_deadline;
956 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
957 cl->cl_eligible.dx = 0;
958 cl->cl_eligible.dy = 0;
961 /* compute e and d */
962 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
963 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
969 update_ed(struct hfsc_class *cl, int next_len)
971 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
972 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
978 update_d(struct hfsc_class *cl, int next_len)
980 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
984 init_vf(struct hfsc_class *cl, int len)
986 struct hfsc_class *max_cl, *p;
987 u_int64_t vt, f, cur_time;
992 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
994 if (go_active && cl->cl_nactive++ == 0)
1000 max_cl = actlist_last(cl->cl_parent->cl_actc);
1001 if (max_cl != NULL) {
1003 * set vt to the average of the min and max
1004 * classes. if the parent's period didn't
1005 * change, don't decrease vt of the class.
1008 if (cl->cl_parent->cl_cvtmin != 0)
1009 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
1011 if (cl->cl_parent->cl_vtperiod !=
1012 cl->cl_parentperiod || vt > cl->cl_vt)
1016 * first child for a new parent backlog period.
1017 * add parent's cvtmax to vtoff of children
1018 * to make a new vt (vtoff + vt) larger than
1019 * the vt in the last period for all children.
1021 vt = cl->cl_parent->cl_cvtmax;
1022 for (p = cl->cl_parent->cl_children; p != NULL;
1026 cl->cl_parent->cl_cvtmax = 0;
1027 cl->cl_parent->cl_cvtmin = 0;
1029 cl->cl_initvt = cl->cl_vt;
1031 /* update the virtual curve */
1032 vt = cl->cl_vt + cl->cl_vtoff;
1033 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
1034 if (cl->cl_virtual.x == vt) {
1035 cl->cl_virtual.x -= cl->cl_vtoff;
1040 cl->cl_vtperiod++; /* increment vt period */
1041 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
1042 if (cl->cl_parent->cl_nactive == 0)
1043 cl->cl_parentperiod++;
1048 if (cl->cl_usc != NULL) {
1049 /* class has upper limit curve */
1051 cur_time = read_machclk();
1053 /* update the ulimit curve */
1054 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
1057 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
1063 if (cl->cl_myf > cl->cl_cfmin)
1067 if (f != cl->cl_f) {
1069 update_cfmin(cl->cl_parent);
1075 update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
1077 u_int64_t f, myf_bound, delta;
1080 go_passive = qempty(cl->cl_q);
1082 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
1084 cl->cl_total += len;
1086 if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1089 if (go_passive && --cl->cl_nactive == 0)
1095 /* no more active child, going passive */
1097 /* update cvtmax of the parent class */
1098 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1099 cl->cl_parent->cl_cvtmax = cl->cl_vt;
1101 /* remove this class from the vt list */
1104 update_cfmin(cl->cl_parent);
1112 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1113 - cl->cl_vtoff + cl->cl_vtadj;
1116 * if vt of the class is smaller than cvtmin,
1117 * the class was skipped in the past due to non-fit.
1118 * if so, we need to adjust vtadj.
1120 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1121 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1122 cl->cl_vt = cl->cl_parent->cl_cvtmin;
1125 /* update the vt list */
1128 if (cl->cl_usc != NULL) {
1129 cl->cl_myf = cl->cl_myfadj
1130 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1133 * if myf lags behind by more than one clock tick
1134 * from the current time, adjust myfadj to prevent
1135 * a rate-limited class from going greedy.
1136 * in a steady state under rate-limiting, myf
1137 * fluctuates within one clock tick.
1139 myf_bound = cur_time - machclk_per_tick;
1140 if (cl->cl_myf < myf_bound) {
1141 delta = cur_time - cl->cl_myf;
1142 cl->cl_myfadj += delta;
1143 cl->cl_myf += delta;
1147 /* cl_f is max(cl_myf, cl_cfmin) */
1148 if (cl->cl_myf > cl->cl_cfmin)
1152 if (f != cl->cl_f) {
1154 update_cfmin(cl->cl_parent);
1160 update_cfmin(struct hfsc_class *cl)
1162 struct hfsc_class *p;
1165 if (TAILQ_EMPTY(cl->cl_actc)) {
1169 cfmin = HT_INFINITY;
1170 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1175 if (p->cl_f < cfmin)
1178 cl->cl_cfmin = cfmin;
1182 * TAILQ based ellist and actlist implementation
1183 * (ion wanted to make a calendar queue based implementation)
1186 * eligible list holds backlogged classes being sorted by their eligible times.
1187 * there is one eligible list per interface.
1195 head = malloc(sizeof(ellist_t), M_DEVBUF, M_WAITOK);
1201 ellist_destroy(ellist_t *head)
1203 free(head, M_DEVBUF);
1207 ellist_insert(struct hfsc_class *cl)
1209 struct hfsc_if *hif = cl->cl_hif;
1210 struct hfsc_class *p;
1212 /* check the last entry first */
1213 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
1214 p->cl_e <= cl->cl_e) {
1215 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1219 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
1220 if (cl->cl_e < p->cl_e) {
1221 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1225 ASSERT(0); /* should not reach here */
1229 ellist_remove(struct hfsc_class *cl)
1231 struct hfsc_if *hif = cl->cl_hif;
1233 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1237 ellist_update(struct hfsc_class *cl)
1239 struct hfsc_if *hif = cl->cl_hif;
1240 struct hfsc_class *p, *last;
1243 * the eligible time of a class increases monotonically.
1244 * if the next entry has a larger eligible time, nothing to do.
1246 p = TAILQ_NEXT(cl, cl_ellist);
1247 if (p == NULL || cl->cl_e <= p->cl_e)
1250 /* check the last entry */
1251 last = TAILQ_LAST(hif->hif_eligible, _eligible);
1252 ASSERT(last != NULL);
1253 if (last->cl_e <= cl->cl_e) {
1254 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1255 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1260 * the new position must be between the next entry
1261 * and the last entry
1263 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1264 if (cl->cl_e < p->cl_e) {
1265 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1266 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1270 ASSERT(0); /* should not reach here */
1273 /* find the class with the minimum deadline among the eligible classes */
1275 ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
1277 struct hfsc_class *p, *cl = NULL;
1279 TAILQ_FOREACH(p, head, cl_ellist) {
1280 if (p->cl_e > cur_time)
1282 if (cl == NULL || p->cl_d < cl->cl_d)
1289 * active children list holds backlogged child classes being sorted
1290 * by their virtual time.
1291 * each intermediate class has one active children list.
1298 head = malloc(sizeof(actlist_t), M_DEVBUF, M_WAITOK);
1304 actlist_destroy(actlist_t *head)
1306 free(head, M_DEVBUF);
1309 actlist_insert(struct hfsc_class *cl)
1311 struct hfsc_class *p;
1313 /* check the last entry first */
1314 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1315 || p->cl_vt <= cl->cl_vt) {
1316 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1320 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1321 if (cl->cl_vt < p->cl_vt) {
1322 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1326 ASSERT(0); /* should not reach here */
1330 actlist_remove(struct hfsc_class *cl)
1332 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1336 actlist_update(struct hfsc_class *cl)
1338 struct hfsc_class *p, *last;
1341 * the virtual time of a class increases monotonically during its
1342 * backlogged period.
1343 * if the next entry has a larger virtual time, nothing to do.
1345 p = TAILQ_NEXT(cl, cl_actlist);
1346 if (p == NULL || cl->cl_vt < p->cl_vt)
1349 /* check the last entry */
1350 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1351 ASSERT(last != NULL);
1352 if (last->cl_vt <= cl->cl_vt) {
1353 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1354 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1359 * the new position must be between the next entry
1360 * and the last entry
1362 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1363 if (cl->cl_vt < p->cl_vt) {
1364 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1365 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1369 ASSERT(0); /* should not reach here */
1372 static struct hfsc_class *
1373 actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
1375 struct hfsc_class *p;
1377 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1378 if (p->cl_f <= cur_time)
1385 * service curve support functions
1387 * external service curve parameters
1390 * internal service curve parameters
1391 * sm: (bytes/tsc_interval) << SM_SHIFT
1392 * ism: (tsc_count/byte) << ISM_SHIFT
1395 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1396 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1397 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1398 * digits in decimal using the following table.
1400 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1401 * ----------+-------------------------------------------------------
1402 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1403 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1404 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1406 * nsec/byte 80000 8000 800 80 8
1407 * ism(500MHz) 40000 4000 400 40 4
1408 * ism(200MHz) 16000 1600 160 16 1.6
1411 #define ISM_SHIFT 10
1413 #define SM_MASK ((1LL << SM_SHIFT) - 1)
1414 #define ISM_MASK ((1LL << ISM_SHIFT) - 1)
1416 static __inline u_int64_t
1417 seg_x2y(u_int64_t x, u_int64_t sm)
1423 * y = x * sm >> SM_SHIFT
1424 * but divide it for the upper and lower bits to avoid overflow
1426 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1430 static __inline u_int64_t
1431 seg_y2x(u_int64_t y, u_int64_t ism)
1437 else if (ism == HT_INFINITY)
1440 x = (y >> ISM_SHIFT) * ism
1441 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1446 static __inline u_int64_t
1451 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1455 static __inline u_int64_t
1463 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1467 static __inline u_int64_t
1472 dx = ((u_int64_t)d * machclk_freq) / 1000;
1481 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1490 d = dx * 1000 / machclk_freq;
1495 sc2isc(struct service_curve *sc, struct internal_sc *isc)
1497 isc->sm1 = m2sm(sc->m1);
1498 isc->ism1 = m2ism(sc->m1);
1499 isc->dx = d2dx(sc->d);
1500 isc->dy = seg_x2y(isc->dx, isc->sm1);
1501 isc->sm2 = m2sm(sc->m2);
1502 isc->ism2 = m2ism(sc->m2);
1506 * initialize the runtime service curve with the given internal
1507 * service curve starting at (x, y).
1510 rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x,
1515 rtsc->sm1 = isc->sm1;
1516 rtsc->ism1 = isc->ism1;
1519 rtsc->sm2 = isc->sm2;
1520 rtsc->ism2 = isc->ism2;
1524 * calculate the y-projection of the runtime service curve by the
1525 * given x-projection value
1528 rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y)
1534 else if (y <= rtsc->y + rtsc->dy) {
1535 /* x belongs to the 1st segment */
1537 x = rtsc->x + rtsc->dx;
1539 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1541 /* x belongs to the 2nd segment */
1542 x = rtsc->x + rtsc->dx
1543 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1549 rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x)
1555 else if (x <= rtsc->x + rtsc->dx)
1556 /* y belongs to the 1st segment */
1557 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1559 /* y belongs to the 2nd segment */
1560 y = rtsc->y + rtsc->dy
1561 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1566 * update the runtime service curve by taking the minimum of the current
1567 * runtime service curve and the service curve starting at (x, y).
1570 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x,
1573 u_int64_t y1, y2, dx, dy;
1575 if (isc->sm1 <= isc->sm2) {
1576 /* service curve is convex */
1577 y1 = rtsc_x2y(rtsc, x);
1579 /* the current rtsc is smaller */
1587 * service curve is concave
1588 * compute the two y values of the current rtsc
1592 y1 = rtsc_x2y(rtsc, x);
1594 /* rtsc is below isc, no change to rtsc */
1598 y2 = rtsc_x2y(rtsc, x + isc->dx);
1599 if (y2 >= y + isc->dy) {
1600 /* rtsc is above isc, replace rtsc by isc */
1609 * the two curves intersect
1610 * compute the offsets (dx, dy) using the reverse
1611 * function of seg_x2y()
1612 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1614 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1616 * check if (x, y1) belongs to the 1st segment of rtsc.
1617 * if so, add the offset.
1619 if (rtsc->x + rtsc->dx > x)
1620 dx += rtsc->x + rtsc->dx - x;
1621 dy = seg_x2y(dx, isc->sm1);
1631 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1633 sp->class_id = cl->cl_id;
1634 sp->class_handle = cl->cl_handle;
1636 if (cl->cl_rsc != NULL) {
1637 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1638 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1639 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1645 if (cl->cl_fsc != NULL) {
1646 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1647 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1648 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1654 if (cl->cl_usc != NULL) {
1655 sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1656 sp->usc.d = dx2d(cl->cl_usc->dx);
1657 sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1664 sp->total = cl->cl_total;
1665 sp->cumul = cl->cl_cumul;
1672 sp->initvt = cl->cl_initvt;
1673 sp->vtperiod = cl->cl_vtperiod;
1674 sp->parentperiod = cl->cl_parentperiod;
1675 sp->nactive = cl->cl_nactive;
1676 sp->vtoff = cl->cl_vtoff;
1677 sp->cvtmax = cl->cl_cvtmax;
1678 sp->myf = cl->cl_myf;
1679 sp->cfmin = cl->cl_cfmin;
1680 sp->cvtmin = cl->cl_cvtmin;
1681 sp->myfadj = cl->cl_myfadj;
1682 sp->vtadj = cl->cl_vtadj;
1684 sp->cur_time = read_machclk();
1685 sp->machclk_freq = machclk_freq;
1687 sp->qlength = qlen(cl->cl_q);
1688 sp->qlimit = qlimit(cl->cl_q);
1689 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1690 sp->drop_cnt = cl->cl_stats.drop_cnt;
1691 sp->period = cl->cl_stats.period;
1693 sp->qtype = qtype(cl->cl_q);
1695 if (q_is_red(cl->cl_q))
1696 red_getstats(cl->cl_red, &sp->red[0]);
1699 if (q_is_rio(cl->cl_q))
1700 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1704 /* convert a class handle to the corresponding class pointer */
1705 static struct hfsc_class *
1706 clh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
1709 struct hfsc_class *cl;
1714 * first, try optimistically the slot matching the lower bits of
1715 * the handle. if it fails, do the linear table search.
1717 i = chandle % HFSC_MAX_CLASSES;
1718 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1720 for (i = 0; i < HFSC_MAX_CLASSES; i++)
1721 if ((cl = hif->hif_class_tbl[i]) != NULL &&
1722 cl->cl_handle == chandle)
1728 static struct hfsc_if *
1729 hfsc_attach(ifq, bandwidth)
1733 struct hfsc_if *hif;
1735 hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK);
1738 bzero(hif, sizeof(struct hfsc_if));
1740 hif->hif_eligible = ellist_alloc();
1741 if (hif->hif_eligible == NULL) {
1742 free(hif, M_DEVBUF);
1748 /* add this state to the hfsc list */
1749 hif->hif_next = hif_list;
1757 struct hfsc_if *hif;
1759 (void)hfsc_clear_interface(hif);
1760 (void)hfsc_class_destroy(hif->hif_rootclass);
1762 /* remove this interface from the hif list */
1763 if (hif_list == hif)
1764 hif_list = hif->hif_next;
1768 for (h = hif_list; h != NULL; h = h->hif_next)
1769 if (h->hif_next == hif) {
1770 h->hif_next = hif->hif_next;
1776 ellist_destroy(hif->hif_eligible);
1778 free(hif, M_DEVBUF);
1784 hfsc_class_modify(cl, rsc, fsc, usc)
1785 struct hfsc_class *cl;
1786 struct service_curve *rsc, *fsc, *usc;
1788 struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp;
1792 rsc_tmp = fsc_tmp = usc_tmp = NULL;
1793 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
1794 cl->cl_rsc == NULL) {
1795 rsc_tmp = malloc(sizeof(struct internal_sc),
1796 M_DEVBUF, M_WAITOK);
1797 if (rsc_tmp == NULL)
1800 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
1801 cl->cl_fsc == NULL) {
1802 fsc_tmp = malloc(sizeof(struct internal_sc),
1803 M_DEVBUF, M_WAITOK);
1804 if (fsc_tmp == NULL) {
1809 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) &&
1810 cl->cl_usc == NULL) {
1811 usc_tmp = malloc(sizeof(struct internal_sc),
1812 M_DEVBUF, M_WAITOK);
1813 if (usc_tmp == NULL) {
1820 cur_time = read_machclk();
1826 IFQ_LOCK(cl->cl_hif->hif_ifq);
1829 if (rsc->m1 == 0 && rsc->m2 == 0) {
1830 if (cl->cl_rsc != NULL) {
1831 if (!qempty(cl->cl_q))
1833 free(cl->cl_rsc, M_DEVBUF);
1837 if (cl->cl_rsc == NULL)
1838 cl->cl_rsc = rsc_tmp;
1839 sc2isc(rsc, cl->cl_rsc);
1840 rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time,
1842 cl->cl_eligible = cl->cl_deadline;
1843 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
1844 cl->cl_eligible.dx = 0;
1845 cl->cl_eligible.dy = 0;
1851 if (fsc->m1 == 0 && fsc->m2 == 0) {
1852 if (cl->cl_fsc != NULL) {
1853 if (!qempty(cl->cl_q))
1855 free(cl->cl_fsc, M_DEVBUF);
1859 if (cl->cl_fsc == NULL)
1860 cl->cl_fsc = fsc_tmp;
1861 sc2isc(fsc, cl->cl_fsc);
1862 rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt,
1868 if (usc->m1 == 0 && usc->m2 == 0) {
1869 if (cl->cl_usc != NULL) {
1870 free(cl->cl_usc, M_DEVBUF);
1875 if (cl->cl_usc == NULL)
1876 cl->cl_usc = usc_tmp;
1877 sc2isc(usc, cl->cl_usc);
1878 rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time,
1883 if (!qempty(cl->cl_q)) {
1884 if (cl->cl_rsc != NULL)
1885 update_ed(cl, m_pktlen(qhead(cl->cl_q)));
1886 if (cl->cl_fsc != NULL)
1887 update_vf(cl, 0, cur_time);
1888 /* is this enough? */
1891 IFQ_UNLOCK(cl->cl_hif->hif_ifq);
1898 * hfsc device interface
1901 hfscopen(dev, flag, fmt, p)
1904 #if (__FreeBSD_version > 500000)
1910 if (machclk_freq == 0)
1913 if (machclk_freq == 0) {
1914 printf("hfsc: no cpu clock available!\n");
1918 /* everything will be done when the queueing scheme is attached. */
1923 hfscclose(dev, flag, fmt, p)
1926 #if (__FreeBSD_version > 500000)
1932 struct hfsc_if *hif;
1935 while ((hif = hif_list) != NULL) {
1937 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1938 altq_disable(hif->hif_ifq);
1940 err = altq_detach(hif->hif_ifq);
1942 err = hfsc_detach(hif);
1943 if (err != 0 && error == 0)
1951 hfscioctl(dev, cmd, addr, flag, p)
1956 #if (__FreeBSD_version > 500000)
1962 struct hfsc_if *hif;
1963 struct hfsc_interface *ifacep;
1966 /* check super-user privilege */
1971 #if (__FreeBSD_version > 700000)
1972 if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
1974 #elsif (__FreeBSD_version > 400000)
1975 if ((error = suser(p)) != 0)
1978 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1986 case HFSC_IF_ATTACH:
1987 error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1990 case HFSC_IF_DETACH:
1991 error = hfsccmd_if_detach((struct hfsc_interface *)addr);
1996 case HFSC_CLEAR_HIERARCHY:
1997 ifacep = (struct hfsc_interface *)addr;
1998 if ((hif = altq_lookup(ifacep->hfsc_ifname,
1999 ALTQT_HFSC)) == NULL) {
2007 if (hif->hif_defaultclass == NULL) {
2009 printf("hfsc: no default class\n");
2014 error = altq_enable(hif->hif_ifq);
2018 error = altq_disable(hif->hif_ifq);
2021 case HFSC_CLEAR_HIERARCHY:
2022 hfsc_clear_interface(hif);
2027 case HFSC_ADD_CLASS:
2028 error = hfsccmd_add_class((struct hfsc_add_class *)addr);
2031 case HFSC_DEL_CLASS:
2032 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
2035 case HFSC_MOD_CLASS:
2036 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
2039 case HFSC_ADD_FILTER:
2040 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
2043 case HFSC_DEL_FILTER:
2044 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
2048 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
2059 hfsccmd_if_attach(ap)
2060 struct hfsc_attach *ap;
2062 struct hfsc_if *hif;
2066 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
2069 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
2073 * set HFSC to this ifnet structure.
2075 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
2076 hfsc_enqueue, hfsc_dequeue, hfsc_request,
2077 &hif->hif_classifier, acc_classify)) != 0)
2078 (void)hfsc_detach(hif);
2084 hfsccmd_if_detach(ap)
2085 struct hfsc_interface *ap;
2087 struct hfsc_if *hif;
2090 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
2093 if (ALTQ_IS_ENABLED(hif->hif_ifq))
2094 altq_disable(hif->hif_ifq);
2096 if ((error = altq_detach(hif->hif_ifq)))
2099 return hfsc_detach(hif);
2103 hfsccmd_add_class(ap)
2104 struct hfsc_add_class *ap;
2106 struct hfsc_if *hif;
2107 struct hfsc_class *cl, *parent;
2110 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2113 if (ap->parent_handle == HFSC_NULLCLASS_HANDLE &&
2114 hif->hif_rootclass == NULL)
2116 else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL)
2119 /* assign a class handle (use a free slot number for now) */
2120 for (i = 1; i < HFSC_MAX_CLASSES; i++)
2121 if (hif->hif_class_tbl[i] == NULL)
2123 if (i == HFSC_MAX_CLASSES)
2126 if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL,
2127 parent, ap->qlimit, ap->flags, i)) == NULL)
2130 /* return a class handle to the user */
2131 ap->class_handle = i;
2137 hfsccmd_delete_class(ap)
2138 struct hfsc_delete_class *ap;
2140 struct hfsc_if *hif;
2141 struct hfsc_class *cl;
2143 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2146 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2149 return hfsc_class_destroy(cl);
2153 hfsccmd_modify_class(ap)
2154 struct hfsc_modify_class *ap;
2156 struct hfsc_if *hif;
2157 struct hfsc_class *cl;
2158 struct service_curve *rsc = NULL;
2159 struct service_curve *fsc = NULL;
2160 struct service_curve *usc = NULL;
2162 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2165 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2168 if (ap->sctype & HFSC_REALTIMESC)
2169 rsc = &ap->service_curve;
2170 if (ap->sctype & HFSC_LINKSHARINGSC)
2171 fsc = &ap->service_curve;
2172 if (ap->sctype & HFSC_UPPERLIMITSC)
2173 usc = &ap->service_curve;
2175 return hfsc_class_modify(cl, rsc, fsc, usc);
2179 hfsccmd_add_filter(ap)
2180 struct hfsc_add_filter *ap;
2182 struct hfsc_if *hif;
2183 struct hfsc_class *cl;
2185 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2188 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2191 if (is_a_parent_class(cl)) {
2193 printf("hfsccmd_add_filter: not a leaf class!\n");
2198 return acc_add_filter(&hif->hif_classifier, &ap->filter,
2199 cl, &ap->filter_handle);
2203 hfsccmd_delete_filter(ap)
2204 struct hfsc_delete_filter *ap;
2206 struct hfsc_if *hif;
2208 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2211 return acc_delete_filter(&hif->hif_classifier,
2216 hfsccmd_class_stats(ap)
2217 struct hfsc_class_stats *ap;
2219 struct hfsc_if *hif;
2220 struct hfsc_class *cl;
2221 struct hfsc_classstats stats, *usp;
2222 int n, nclasses, error;
2224 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2227 ap->cur_time = read_machclk();
2228 ap->machclk_freq = machclk_freq;
2229 ap->hif_classes = hif->hif_classes;
2230 ap->hif_packets = hif->hif_packets;
2232 /* skip the first N classes in the tree */
2233 nclasses = ap->nskip;
2234 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
2235 cl = hfsc_nextclass(cl), n++)
2240 /* then, read the next N classes in the tree */
2241 nclasses = ap->nclasses;
2243 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
2245 get_class_stats(&stats, cl);
2247 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
2248 sizeof(stats))) != 0)
2259 static struct altqsw hfsc_sw =
2260 {"hfsc", hfscopen, hfscclose, hfscioctl};
2262 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
2263 MODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1);
2264 MODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1);
2266 #endif /* KLD_MODULE */
2267 #endif /* ALTQ3_COMPAT */
2269 #endif /* ALTQ_HFSC */