2 /* $KAME: altq_hfsc.c,v 1.24 2003/12/05 05:40:46 kjc Exp $ */
5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
7 * Permission to use, copy, modify, and distribute this software and
8 * its documentation is hereby granted (including for commercial or
9 * for-profit use), provided that both the copyright notice and this
10 * permission notice appear in all copies of the software, derivative
11 * works, or modified versions, and any portions thereof.
13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
14 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28 * Carnegie Mellon encourages (but does not require) users of this
29 * software to return any improvements or extensions that they make,
30 * and to grant Carnegie Mellon the rights to redistribute these
31 * changes without encumbrance.
34 * H-FSC is described in Proceedings of SIGCOMM'97,
35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
36 * Real-Time and Priority Service"
37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
40 * when a class has an upperlimit, the fit-time is computed from the
41 * upperlimit service curve. the link-sharing scheduler does not schedule
42 * a class whose fit-time exceeds the current time.
45 #if defined(__FreeBSD__) || defined(__NetBSD__)
47 #if (__FreeBSD__ != 2)
50 #include "opt_inet6.h"
53 #endif /* __FreeBSD__ || __NetBSD__ */
55 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
57 #include <sys/param.h>
58 #include <sys/malloc.h>
60 #include <sys/socket.h>
61 #include <sys/systm.h>
62 #include <sys/errno.h>
63 #include <sys/queue.h>
64 #if 1 /* ALTQ3_COMPAT */
65 #include <sys/sockio.h>
67 #include <sys/kernel.h>
68 #endif /* ALTQ3_COMPAT */
71 #include <netinet/in.h>
73 #include <net/pfvar.h>
74 #include <altq/altq.h>
75 #include <altq/altq_hfsc.h>
77 #include <altq/altq_conf.h>
83 static int hfsc_clear_interface(struct hfsc_if *);
84 static int hfsc_request(struct ifaltq *, int, void *);
85 static void hfsc_purge(struct hfsc_if *);
86 static struct hfsc_class *hfsc_class_create(struct hfsc_if *,
87 struct service_curve *, struct service_curve *, struct service_curve *,
88 struct hfsc_class *, int, int, int);
89 static int hfsc_class_destroy(struct hfsc_class *);
90 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *);
91 static int hfsc_enqueue(struct ifaltq *, struct mbuf *,
92 struct altq_pktattr *);
93 static struct mbuf *hfsc_dequeue(struct ifaltq *, int);
95 static int hfsc_addq(struct hfsc_class *, struct mbuf *);
96 static struct mbuf *hfsc_getq(struct hfsc_class *);
97 static struct mbuf *hfsc_pollq(struct hfsc_class *);
98 static void hfsc_purgeq(struct hfsc_class *);
100 static void update_cfmin(struct hfsc_class *);
101 static void set_active(struct hfsc_class *, int);
102 static void set_passive(struct hfsc_class *);
104 static void init_ed(struct hfsc_class *, int);
105 static void update_ed(struct hfsc_class *, int);
106 static void update_d(struct hfsc_class *, int);
107 static void init_vf(struct hfsc_class *, int);
108 static void update_vf(struct hfsc_class *, int, u_int64_t);
109 static ellist_t *ellist_alloc(void);
110 static void ellist_destroy(ellist_t *);
111 static void ellist_insert(struct hfsc_class *);
112 static void ellist_remove(struct hfsc_class *);
113 static void ellist_update(struct hfsc_class *);
114 struct hfsc_class *ellist_get_mindl(ellist_t *, u_int64_t);
115 static actlist_t *actlist_alloc(void);
116 static void actlist_destroy(actlist_t *);
117 static void actlist_insert(struct hfsc_class *);
118 static void actlist_remove(struct hfsc_class *);
119 static void actlist_update(struct hfsc_class *);
121 static struct hfsc_class *actlist_firstfit(struct hfsc_class *,
124 static __inline u_int64_t seg_x2y(u_int64_t, u_int64_t);
125 static __inline u_int64_t seg_y2x(u_int64_t, u_int64_t);
126 static __inline u_int64_t m2sm(u_int);
127 static __inline u_int64_t m2ism(u_int);
128 static __inline u_int64_t d2dx(u_int);
129 static u_int sm2m(u_int64_t);
130 static u_int dx2d(u_int64_t);
132 static void sc2isc(struct service_curve *, struct internal_sc *);
133 static void rtsc_init(struct runtime_sc *, struct internal_sc *,
134 u_int64_t, u_int64_t);
135 static u_int64_t rtsc_y2x(struct runtime_sc *, u_int64_t);
136 static u_int64_t rtsc_x2y(struct runtime_sc *, u_int64_t);
137 static void rtsc_min(struct runtime_sc *, struct internal_sc *,
138 u_int64_t, u_int64_t);
140 static void get_class_stats(struct hfsc_classstats *,
141 struct hfsc_class *);
142 static struct hfsc_class *clh_to_clp(struct hfsc_if *, u_int32_t);
146 static struct hfsc_if *hfsc_attach(struct ifaltq *, u_int);
147 static int hfsc_detach(struct hfsc_if *);
148 static int hfsc_class_modify(struct hfsc_class *, struct service_curve *,
149 struct service_curve *, struct service_curve *);
151 static int hfsccmd_if_attach(struct hfsc_attach *);
152 static int hfsccmd_if_detach(struct hfsc_interface *);
153 static int hfsccmd_add_class(struct hfsc_add_class *);
154 static int hfsccmd_delete_class(struct hfsc_delete_class *);
155 static int hfsccmd_modify_class(struct hfsc_modify_class *);
156 static int hfsccmd_add_filter(struct hfsc_add_filter *);
157 static int hfsccmd_delete_filter(struct hfsc_delete_filter *);
158 static int hfsccmd_class_stats(struct hfsc_class_stats *);
161 #endif /* ALTQ3_COMPAT */
166 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
168 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */
171 /* hif_list keeps all hfsc_if's allocated. */
172 static struct hfsc_if *hif_list = NULL;
173 #endif /* ALTQ3_COMPAT */
176 hfsc_pfattach(struct pf_altq *a)
181 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
188 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
189 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
195 hfsc_add_altq(struct pf_altq *a)
200 if ((ifp = ifunit(a->ifname)) == NULL)
202 if (!ALTQ_IS_READY(&ifp->if_snd))
205 MALLOC(hif, struct hfsc_if *, sizeof(struct hfsc_if),
209 bzero(hif, sizeof(struct hfsc_if));
211 hif->hif_eligible = ellist_alloc();
212 if (hif->hif_eligible == NULL) {
217 hif->hif_ifq = &ifp->if_snd;
219 /* keep the state in pf_altq */
226 hfsc_remove_altq(struct pf_altq *a)
230 if ((hif = a->altq_disc) == NULL)
234 (void)hfsc_clear_interface(hif);
235 (void)hfsc_class_destroy(hif->hif_rootclass);
237 ellist_destroy(hif->hif_eligible);
245 hfsc_add_queue(struct pf_altq *a)
248 struct hfsc_class *cl, *parent;
249 struct hfsc_opts *opts;
250 struct service_curve rtsc, lssc, ulsc;
252 if ((hif = a->altq_disc) == NULL)
255 opts = &a->pq_u.hfsc_opts;
257 if (a->parent_qid == HFSC_NULLCLASS_HANDLE &&
258 hif->hif_rootclass == NULL)
260 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
266 if (clh_to_clp(hif, a->qid) != NULL)
269 rtsc.m1 = opts->rtsc_m1;
270 rtsc.d = opts->rtsc_d;
271 rtsc.m2 = opts->rtsc_m2;
272 lssc.m1 = opts->lssc_m1;
273 lssc.d = opts->lssc_d;
274 lssc.m2 = opts->lssc_m2;
275 ulsc.m1 = opts->ulsc_m1;
276 ulsc.d = opts->ulsc_d;
277 ulsc.m2 = opts->ulsc_m2;
279 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
280 parent, a->qlimit, opts->flags, a->qid);
288 hfsc_remove_queue(struct pf_altq *a)
291 struct hfsc_class *cl;
293 if ((hif = a->altq_disc) == NULL)
296 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
299 return (hfsc_class_destroy(cl));
303 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
306 struct hfsc_class *cl;
307 struct hfsc_classstats stats;
310 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
313 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
316 if (*nbytes < sizeof(stats))
319 get_class_stats(&stats, cl);
321 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
323 *nbytes = sizeof(stats);
328 * bring the interface back to the initial state by discarding
329 * all the filters and classes except the root class.
332 hfsc_clear_interface(struct hfsc_if *hif)
334 struct hfsc_class *cl;
337 /* free the filters for this interface */
338 acc_discard_filters(&hif->hif_classifier, NULL, 1);
341 /* clear out the classes */
342 while (hif->hif_rootclass != NULL &&
343 (cl = hif->hif_rootclass->cl_children) != NULL) {
345 * remove the first leaf class found in the hierarchy
348 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
349 if (!is_a_parent_class(cl)) {
350 (void)hfsc_class_destroy(cl);
360 hfsc_request(struct ifaltq *ifq, int req, void *arg)
362 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
364 IFQ_LOCK_ASSERT(ifq);
374 /* discard all the queued packets on the interface */
376 hfsc_purge(struct hfsc_if *hif)
378 struct hfsc_class *cl;
380 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
381 if (!qempty(cl->cl_q))
383 if (ALTQ_IS_ENABLED(hif->hif_ifq))
384 hif->hif_ifq->ifq_len = 0;
388 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
389 struct service_curve *fsc, struct service_curve *usc,
390 struct hfsc_class *parent, int qlimit, int flags, int qid)
392 struct hfsc_class *cl, *p;
395 if (hif->hif_classes >= HFSC_MAX_CLASSES)
399 if (flags & HFCF_RED) {
401 printf("hfsc_class_create: RED not configured for HFSC!\n");
407 MALLOC(cl, struct hfsc_class *, sizeof(struct hfsc_class),
411 bzero(cl, sizeof(struct hfsc_class));
413 MALLOC(cl->cl_q, class_queue_t *, sizeof(class_queue_t),
415 if (cl->cl_q == NULL)
417 bzero(cl->cl_q, sizeof(class_queue_t));
419 cl->cl_actc = actlist_alloc();
420 if (cl->cl_actc == NULL)
424 qlimit = 50; /* use default */
425 qlimit(cl->cl_q) = qlimit;
426 qtype(cl->cl_q) = Q_DROPTAIL;
428 cl->cl_flags = flags;
430 if (flags & (HFCF_RED|HFCF_RIO)) {
431 int red_flags, red_pkttime;
435 if (rsc != NULL && rsc->m2 > m2)
437 if (fsc != NULL && fsc->m2 > m2)
439 if (usc != NULL && usc->m2 > m2)
443 if (flags & HFCF_ECN)
444 red_flags |= REDF_ECN;
446 if (flags & HFCF_CLEARDSCP)
447 red_flags |= RIOF_CLEARDSCP;
450 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
452 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
453 * 1000 * 1000 * 1000 / (m2 / 8);
454 if (flags & HFCF_RED) {
455 cl->cl_red = red_alloc(0, 0,
456 qlimit(cl->cl_q) * 10/100,
457 qlimit(cl->cl_q) * 30/100,
458 red_flags, red_pkttime);
459 if (cl->cl_red != NULL)
460 qtype(cl->cl_q) = Q_RED;
464 cl->cl_red = (red_t *)rio_alloc(0, NULL,
465 red_flags, red_pkttime);
466 if (cl->cl_red != NULL)
467 qtype(cl->cl_q) = Q_RIO;
471 #endif /* ALTQ_RED */
473 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
474 MALLOC(cl->cl_rsc, struct internal_sc *,
475 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
476 if (cl->cl_rsc == NULL)
478 sc2isc(rsc, cl->cl_rsc);
479 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
480 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
482 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
483 MALLOC(cl->cl_fsc, struct internal_sc *,
484 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
485 if (cl->cl_fsc == NULL)
487 sc2isc(fsc, cl->cl_fsc);
488 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
490 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
491 MALLOC(cl->cl_usc, struct internal_sc *,
492 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
493 if (cl->cl_usc == NULL)
495 sc2isc(usc, cl->cl_usc);
496 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
499 cl->cl_id = hif->hif_classid++;
502 cl->cl_parent = parent;
509 IFQ_LOCK(hif->hif_ifq);
513 * find a free slot in the class table. if the slot matching
514 * the lower bits of qid is free, use this slot. otherwise,
515 * use the first free slot.
517 i = qid % HFSC_MAX_CLASSES;
518 if (hif->hif_class_tbl[i] == NULL)
519 hif->hif_class_tbl[i] = cl;
521 for (i = 0; i < HFSC_MAX_CLASSES; i++)
522 if (hif->hif_class_tbl[i] == NULL) {
523 hif->hif_class_tbl[i] = cl;
526 if (i == HFSC_MAX_CLASSES) {
527 IFQ_UNLOCK(hif->hif_ifq);
533 if (flags & HFCF_DEFAULTCLASS)
534 hif->hif_defaultclass = cl;
536 if (parent == NULL) {
537 /* this is root class */
538 hif->hif_rootclass = cl;
540 /* add this class to the children list of the parent */
541 if ((p = parent->cl_children) == NULL)
542 parent->cl_children = cl;
544 while (p->cl_siblings != NULL)
549 IFQ_UNLOCK(hif->hif_ifq);
555 if (cl->cl_actc != NULL)
556 actlist_destroy(cl->cl_actc);
557 if (cl->cl_red != NULL) {
559 if (q_is_rio(cl->cl_q))
560 rio_destroy((rio_t *)cl->cl_red);
563 if (q_is_red(cl->cl_q))
564 red_destroy(cl->cl_red);
567 if (cl->cl_fsc != NULL)
568 FREE(cl->cl_fsc, M_DEVBUF);
569 if (cl->cl_rsc != NULL)
570 FREE(cl->cl_rsc, M_DEVBUF);
571 if (cl->cl_usc != NULL)
572 FREE(cl->cl_usc, M_DEVBUF);
573 if (cl->cl_q != NULL)
574 FREE(cl->cl_q, M_DEVBUF);
580 hfsc_class_destroy(struct hfsc_class *cl)
587 if (is_a_parent_class(cl))
595 IFQ_LOCK(cl->cl_hif->hif_ifq);
598 /* delete filters referencing to this class */
599 acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
600 #endif /* ALTQ3_COMPAT */
602 if (!qempty(cl->cl_q))
605 if (cl->cl_parent == NULL) {
606 /* this is root class */
608 struct hfsc_class *p = cl->cl_parent->cl_children;
611 cl->cl_parent->cl_children = cl->cl_siblings;
613 if (p->cl_siblings == cl) {
614 p->cl_siblings = cl->cl_siblings;
617 } while ((p = p->cl_siblings) != NULL);
621 for (i = 0; i < HFSC_MAX_CLASSES; i++)
622 if (cl->cl_hif->hif_class_tbl[i] == cl) {
623 cl->cl_hif->hif_class_tbl[i] = NULL;
627 cl->cl_hif->hif_classes--;
628 IFQ_UNLOCK(cl->cl_hif->hif_ifq);
631 actlist_destroy(cl->cl_actc);
633 if (cl->cl_red != NULL) {
635 if (q_is_rio(cl->cl_q))
636 rio_destroy((rio_t *)cl->cl_red);
639 if (q_is_red(cl->cl_q))
640 red_destroy(cl->cl_red);
644 IFQ_LOCK(cl->cl_hif->hif_ifq);
645 if (cl == cl->cl_hif->hif_rootclass)
646 cl->cl_hif->hif_rootclass = NULL;
647 if (cl == cl->cl_hif->hif_defaultclass)
648 cl->cl_hif->hif_defaultclass = NULL;
649 IFQ_UNLOCK(cl->cl_hif->hif_ifq);
651 if (cl->cl_usc != NULL)
652 FREE(cl->cl_usc, M_DEVBUF);
653 if (cl->cl_fsc != NULL)
654 FREE(cl->cl_fsc, M_DEVBUF);
655 if (cl->cl_rsc != NULL)
656 FREE(cl->cl_rsc, M_DEVBUF);
657 FREE(cl->cl_q, M_DEVBUF);
664 * hfsc_nextclass returns the next class in the tree.
666 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
669 static struct hfsc_class *
670 hfsc_nextclass(struct hfsc_class *cl)
672 if (cl->cl_children != NULL)
673 cl = cl->cl_children;
674 else if (cl->cl_siblings != NULL)
675 cl = cl->cl_siblings;
677 while ((cl = cl->cl_parent) != NULL)
678 if (cl->cl_siblings) {
679 cl = cl->cl_siblings;
688 * hfsc_enqueue is an enqueue function to be registered to
689 * (*altq_enqueue) in struct ifaltq.
692 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
694 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
695 struct hfsc_class *cl;
699 IFQ_LOCK_ASSERT(ifq);
701 /* grab class set by classifier */
702 if ((m->m_flags & M_PKTHDR) == 0) {
703 /* should not happen */
704 #if defined(__NetBSD__) || defined(__OpenBSD__)\
705 || (defined(__FreeBSD__) && __FreeBSD_version >= 501113)
706 printf("altq: packet for %s does not have pkthdr\n",
707 ifq->altq_ifp->if_xname);
709 printf("altq: packet for %s%d does not have pkthdr\n",
710 ifq->altq_ifp->if_name, ifq->altq_ifp->if_unit);
716 if ((t = m_tag_find(m, PACKET_TAG_PF_QID, NULL)) != NULL)
717 cl = clh_to_clp(hif, ((struct altq_tag *)(t+1))->qid);
719 else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
720 cl = pktattr->pattr_class;
722 if (cl == NULL || is_a_parent_class(cl)) {
723 cl = hif->hif_defaultclass;
731 cl->cl_pktattr = pktattr; /* save proto hdr used by ECN */
734 cl->cl_pktattr = NULL;
736 if (hfsc_addq(cl, m) != 0) {
737 /* drop occurred. mbuf was freed in hfsc_addq. */
738 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
742 cl->cl_hif->hif_packets++;
744 /* successfully queued. */
745 if (qlen(cl->cl_q) == 1)
746 set_active(cl, m_pktlen(m));
752 * hfsc_dequeue is a dequeue function to be registered to
753 * (*altq_dequeue) in struct ifaltq.
755 * note: ALTDQ_POLL returns the next packet without removing the packet
756 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
757 * ALTDQ_REMOVE must return the same packet if called immediately
761 hfsc_dequeue(struct ifaltq *ifq, int op)
763 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
764 struct hfsc_class *cl;
770 IFQ_LOCK_ASSERT(ifq);
772 if (hif->hif_packets == 0)
773 /* no packet in the tree */
776 cur_time = read_machclk();
778 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
780 cl = hif->hif_pollcache;
781 hif->hif_pollcache = NULL;
782 /* check if the class was scheduled by real-time criteria */
783 if (cl->cl_rsc != NULL)
784 realtime = (cl->cl_e <= cur_time);
787 * if there are eligible classes, use real-time criteria.
788 * find the class with the minimum deadline among
789 * the eligible classes.
791 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time))
799 * use link-sharing criteria
800 * get the class with the minimum vt in the hierarchy
802 cl = hif->hif_rootclass;
803 while (is_a_parent_class(cl)) {
805 cl = actlist_firstfit(cl, cur_time);
809 printf("%d fit but none found\n",fits);
814 * update parent's cl_cvtmin.
815 * don't update if the new vt is smaller.
817 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
818 cl->cl_parent->cl_cvtmin = cl->cl_vt;
825 if (op == ALTDQ_POLL) {
826 hif->hif_pollcache = cl;
834 panic("hfsc_dequeue:");
836 cl->cl_hif->hif_packets--;
838 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
840 update_vf(cl, len, cur_time);
844 if (!qempty(cl->cl_q)) {
845 if (cl->cl_rsc != NULL) {
847 next_len = m_pktlen(qhead(cl->cl_q));
850 update_ed(cl, next_len);
852 update_d(cl, next_len);
855 /* the class becomes passive */
863 hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
867 if (q_is_rio(cl->cl_q))
868 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
872 if (q_is_red(cl->cl_q))
873 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
875 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
880 if (cl->cl_flags & HFCF_CLEARDSCP)
881 write_dsfield(m, cl->cl_pktattr, 0);
889 hfsc_getq(struct hfsc_class *cl)
892 if (q_is_rio(cl->cl_q))
893 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
896 if (q_is_red(cl->cl_q))
897 return red_getq(cl->cl_red, cl->cl_q);
899 return _getq(cl->cl_q);
903 hfsc_pollq(struct hfsc_class *cl)
905 return qhead(cl->cl_q);
909 hfsc_purgeq(struct hfsc_class *cl)
913 if (qempty(cl->cl_q))
916 while ((m = _getq(cl->cl_q)) != NULL) {
917 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
919 cl->cl_hif->hif_packets--;
920 IFQ_DEC_LEN(cl->cl_hif->hif_ifq);
922 ASSERT(qlen(cl->cl_q) == 0);
924 update_vf(cl, 0, 0); /* remove cl from the actlist */
929 set_active(struct hfsc_class *cl, int len)
931 if (cl->cl_rsc != NULL)
933 if (cl->cl_fsc != NULL)
936 cl->cl_stats.period++;
940 set_passive(struct hfsc_class *cl)
942 if (cl->cl_rsc != NULL)
946 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
947 * needs to be called explicitly to remove a class from actlist
952 init_ed(struct hfsc_class *cl, int next_len)
956 cur_time = read_machclk();
958 /* update the deadline curve */
959 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
962 * update the eligible curve.
963 * for concave, it is equal to the deadline curve.
964 * for convex, it is a linear curve with slope m2.
966 cl->cl_eligible = cl->cl_deadline;
967 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
968 cl->cl_eligible.dx = 0;
969 cl->cl_eligible.dy = 0;
972 /* compute e and d */
973 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
974 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
980 update_ed(struct hfsc_class *cl, int next_len)
982 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
983 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
989 update_d(struct hfsc_class *cl, int next_len)
991 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
995 init_vf(struct hfsc_class *cl, int len)
997 struct hfsc_class *max_cl, *p;
998 u_int64_t vt, f, cur_time;
1003 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
1005 if (go_active && cl->cl_nactive++ == 0)
1011 max_cl = actlist_last(cl->cl_parent->cl_actc);
1012 if (max_cl != NULL) {
1014 * set vt to the average of the min and max
1015 * classes. if the parent's period didn't
1016 * change, don't decrease vt of the class.
1019 if (cl->cl_parent->cl_cvtmin != 0)
1020 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
1022 if (cl->cl_parent->cl_vtperiod !=
1023 cl->cl_parentperiod || vt > cl->cl_vt)
1027 * first child for a new parent backlog period.
1028 * add parent's cvtmax to vtoff of children
1029 * to make a new vt (vtoff + vt) larger than
1030 * the vt in the last period for all children.
1032 vt = cl->cl_parent->cl_cvtmax;
1033 for (p = cl->cl_parent->cl_children; p != NULL;
1037 cl->cl_parent->cl_cvtmax = 0;
1038 cl->cl_parent->cl_cvtmin = 0;
1040 cl->cl_initvt = cl->cl_vt;
1042 /* update the virtual curve */
1043 vt = cl->cl_vt + cl->cl_vtoff;
1044 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
1045 if (cl->cl_virtual.x == vt) {
1046 cl->cl_virtual.x -= cl->cl_vtoff;
1051 cl->cl_vtperiod++; /* increment vt period */
1052 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
1053 if (cl->cl_parent->cl_nactive == 0)
1054 cl->cl_parentperiod++;
1059 if (cl->cl_usc != NULL) {
1060 /* class has upper limit curve */
1062 cur_time = read_machclk();
1064 /* update the ulimit curve */
1065 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
1068 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
1074 if (cl->cl_myf > cl->cl_cfmin)
1078 if (f != cl->cl_f) {
1080 update_cfmin(cl->cl_parent);
1086 update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
1088 u_int64_t f, myf_bound, delta;
1091 go_passive = qempty(cl->cl_q);
1093 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
1095 cl->cl_total += len;
1097 if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1100 if (go_passive && --cl->cl_nactive == 0)
1106 /* no more active child, going passive */
1108 /* update cvtmax of the parent class */
1109 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1110 cl->cl_parent->cl_cvtmax = cl->cl_vt;
1112 /* remove this class from the vt list */
1115 update_cfmin(cl->cl_parent);
1123 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1124 - cl->cl_vtoff + cl->cl_vtadj;
1127 * if vt of the class is smaller than cvtmin,
1128 * the class was skipped in the past due to non-fit.
1129 * if so, we need to adjust vtadj.
1131 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1132 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1133 cl->cl_vt = cl->cl_parent->cl_cvtmin;
1136 /* update the vt list */
1139 if (cl->cl_usc != NULL) {
1140 cl->cl_myf = cl->cl_myfadj
1141 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1144 * if myf lags behind by more than one clock tick
1145 * from the current time, adjust myfadj to prevent
1146 * a rate-limited class from going greedy.
1147 * in a steady state under rate-limiting, myf
1148 * fluctuates within one clock tick.
1150 myf_bound = cur_time - machclk_per_tick;
1151 if (cl->cl_myf < myf_bound) {
1152 delta = cur_time - cl->cl_myf;
1153 cl->cl_myfadj += delta;
1154 cl->cl_myf += delta;
1158 /* cl_f is max(cl_myf, cl_cfmin) */
1159 if (cl->cl_myf > cl->cl_cfmin)
1163 if (f != cl->cl_f) {
1165 update_cfmin(cl->cl_parent);
1171 update_cfmin(struct hfsc_class *cl)
1173 struct hfsc_class *p;
1176 if (TAILQ_EMPTY(cl->cl_actc)) {
1180 cfmin = HT_INFINITY;
1181 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1186 if (p->cl_f < cfmin)
1189 cl->cl_cfmin = cfmin;
1193 * TAILQ based ellist and actlist implementation
1194 * (ion wanted to make a calendar queue based implementation)
1197 * eligible list holds backlogged classes being sorted by their eligible times.
1198 * there is one eligible list per interface.
1206 MALLOC(head, ellist_t *, sizeof(ellist_t), M_DEVBUF, M_WAITOK);
1212 ellist_destroy(ellist_t *head)
1214 FREE(head, M_DEVBUF);
1218 ellist_insert(struct hfsc_class *cl)
1220 struct hfsc_if *hif = cl->cl_hif;
1221 struct hfsc_class *p;
1223 /* check the last entry first */
1224 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
1225 p->cl_e <= cl->cl_e) {
1226 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1230 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
1231 if (cl->cl_e < p->cl_e) {
1232 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1236 ASSERT(0); /* should not reach here */
1240 ellist_remove(struct hfsc_class *cl)
1242 struct hfsc_if *hif = cl->cl_hif;
1244 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1248 ellist_update(struct hfsc_class *cl)
1250 struct hfsc_if *hif = cl->cl_hif;
1251 struct hfsc_class *p, *last;
1254 * the eligible time of a class increases monotonically.
1255 * if the next entry has a larger eligible time, nothing to do.
1257 p = TAILQ_NEXT(cl, cl_ellist);
1258 if (p == NULL || cl->cl_e <= p->cl_e)
1261 /* check the last entry */
1262 last = TAILQ_LAST(hif->hif_eligible, _eligible);
1263 ASSERT(last != NULL);
1264 if (last->cl_e <= cl->cl_e) {
1265 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1266 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1271 * the new position must be between the next entry
1272 * and the last entry
1274 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1275 if (cl->cl_e < p->cl_e) {
1276 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1277 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1281 ASSERT(0); /* should not reach here */
1284 /* find the class with the minimum deadline among the eligible classes */
1286 ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
1288 struct hfsc_class *p, *cl = NULL;
1290 TAILQ_FOREACH(p, head, cl_ellist) {
1291 if (p->cl_e > cur_time)
1293 if (cl == NULL || p->cl_d < cl->cl_d)
1300 * active children list holds backlogged child classes being sorted
1301 * by their virtual time.
1302 * each intermediate class has one active children list.
1309 MALLOC(head, actlist_t *, sizeof(actlist_t), M_DEVBUF, M_WAITOK);
1315 actlist_destroy(actlist_t *head)
1317 FREE(head, M_DEVBUF);
1320 actlist_insert(struct hfsc_class *cl)
1322 struct hfsc_class *p;
1324 /* check the last entry first */
1325 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1326 || p->cl_vt <= cl->cl_vt) {
1327 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1331 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1332 if (cl->cl_vt < p->cl_vt) {
1333 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1337 ASSERT(0); /* should not reach here */
1341 actlist_remove(struct hfsc_class *cl)
1343 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1347 actlist_update(struct hfsc_class *cl)
1349 struct hfsc_class *p, *last;
1352 * the virtual time of a class increases monotonically during its
1353 * backlogged period.
1354 * if the next entry has a larger virtual time, nothing to do.
1356 p = TAILQ_NEXT(cl, cl_actlist);
1357 if (p == NULL || cl->cl_vt < p->cl_vt)
1360 /* check the last entry */
1361 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1362 ASSERT(last != NULL);
1363 if (last->cl_vt <= cl->cl_vt) {
1364 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1365 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1370 * the new position must be between the next entry
1371 * and the last entry
1373 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1374 if (cl->cl_vt < p->cl_vt) {
1375 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1376 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1380 ASSERT(0); /* should not reach here */
1383 static struct hfsc_class *
1384 actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
1386 struct hfsc_class *p;
1388 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1389 if (p->cl_f <= cur_time)
1396 * service curve support functions
1398 * external service curve parameters
1401 * internal service curve parameters
1402 * sm: (bytes/tsc_interval) << SM_SHIFT
1403 * ism: (tsc_count/byte) << ISM_SHIFT
1406 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1407 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1408 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1409 * digits in decimal using the following table.
1411 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1412 * ----------+-------------------------------------------------------
1413 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1414 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1415 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1417 * nsec/byte 80000 8000 800 80 8
1418 * ism(500MHz) 40000 4000 400 40 4
1419 * ism(200MHz) 16000 1600 160 16 1.6
1422 #define ISM_SHIFT 10
1424 #define SM_MASK ((1LL << SM_SHIFT) - 1)
1425 #define ISM_MASK ((1LL << ISM_SHIFT) - 1)
1427 static __inline u_int64_t
1428 seg_x2y(u_int64_t x, u_int64_t sm)
1434 * y = x * sm >> SM_SHIFT
1435 * but divide it for the upper and lower bits to avoid overflow
1437 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1441 static __inline u_int64_t
1442 seg_y2x(u_int64_t y, u_int64_t ism)
1448 else if (ism == HT_INFINITY)
1451 x = (y >> ISM_SHIFT) * ism
1452 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1457 static __inline u_int64_t
1462 sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1466 static __inline u_int64_t
1474 ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1478 static __inline u_int64_t
1483 dx = ((u_int64_t)d * machclk_freq) / 1000;
1492 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1501 d = dx * 1000 / machclk_freq;
1506 sc2isc(struct service_curve *sc, struct internal_sc *isc)
1508 isc->sm1 = m2sm(sc->m1);
1509 isc->ism1 = m2ism(sc->m1);
1510 isc->dx = d2dx(sc->d);
1511 isc->dy = seg_x2y(isc->dx, isc->sm1);
1512 isc->sm2 = m2sm(sc->m2);
1513 isc->ism2 = m2ism(sc->m2);
1517 * initialize the runtime service curve with the given internal
1518 * service curve starting at (x, y).
1521 rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x,
1526 rtsc->sm1 = isc->sm1;
1527 rtsc->ism1 = isc->ism1;
1530 rtsc->sm2 = isc->sm2;
1531 rtsc->ism2 = isc->ism2;
1535 * calculate the y-projection of the runtime service curve by the
1536 * given x-projection value
1539 rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y)
1545 else if (y <= rtsc->y + rtsc->dy) {
1546 /* x belongs to the 1st segment */
1548 x = rtsc->x + rtsc->dx;
1550 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1552 /* x belongs to the 2nd segment */
1553 x = rtsc->x + rtsc->dx
1554 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1560 rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x)
1566 else if (x <= rtsc->x + rtsc->dx)
1567 /* y belongs to the 1st segment */
1568 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1570 /* y belongs to the 2nd segment */
1571 y = rtsc->y + rtsc->dy
1572 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1577 * update the runtime service curve by taking the minimum of the current
1578 * runtime service curve and the service curve starting at (x, y).
1581 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x,
1584 u_int64_t y1, y2, dx, dy;
1586 if (isc->sm1 <= isc->sm2) {
1587 /* service curve is convex */
1588 y1 = rtsc_x2y(rtsc, x);
1590 /* the current rtsc is smaller */
1598 * service curve is concave
1599 * compute the two y values of the current rtsc
1603 y1 = rtsc_x2y(rtsc, x);
1605 /* rtsc is below isc, no change to rtsc */
1609 y2 = rtsc_x2y(rtsc, x + isc->dx);
1610 if (y2 >= y + isc->dy) {
1611 /* rtsc is above isc, replace rtsc by isc */
1620 * the two curves intersect
1621 * compute the offsets (dx, dy) using the reverse
1622 * function of seg_x2y()
1623 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1625 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1627 * check if (x, y1) belongs to the 1st segment of rtsc.
1628 * if so, add the offset.
1630 if (rtsc->x + rtsc->dx > x)
1631 dx += rtsc->x + rtsc->dx - x;
1632 dy = seg_x2y(dx, isc->sm1);
1642 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1644 sp->class_id = cl->cl_id;
1645 sp->class_handle = cl->cl_handle;
1647 if (cl->cl_rsc != NULL) {
1648 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1649 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1650 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1656 if (cl->cl_fsc != NULL) {
1657 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1658 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1659 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1665 if (cl->cl_usc != NULL) {
1666 sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1667 sp->usc.d = dx2d(cl->cl_usc->dx);
1668 sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1675 sp->total = cl->cl_total;
1676 sp->cumul = cl->cl_cumul;
1683 sp->initvt = cl->cl_initvt;
1684 sp->vtperiod = cl->cl_vtperiod;
1685 sp->parentperiod = cl->cl_parentperiod;
1686 sp->nactive = cl->cl_nactive;
1687 sp->vtoff = cl->cl_vtoff;
1688 sp->cvtmax = cl->cl_cvtmax;
1689 sp->myf = cl->cl_myf;
1690 sp->cfmin = cl->cl_cfmin;
1691 sp->cvtmin = cl->cl_cvtmin;
1692 sp->myfadj = cl->cl_myfadj;
1693 sp->vtadj = cl->cl_vtadj;
1695 sp->cur_time = read_machclk();
1696 sp->machclk_freq = machclk_freq;
1698 sp->qlength = qlen(cl->cl_q);
1699 sp->qlimit = qlimit(cl->cl_q);
1700 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1701 sp->drop_cnt = cl->cl_stats.drop_cnt;
1702 sp->period = cl->cl_stats.period;
1704 sp->qtype = qtype(cl->cl_q);
1706 if (q_is_red(cl->cl_q))
1707 red_getstats(cl->cl_red, &sp->red[0]);
1710 if (q_is_rio(cl->cl_q))
1711 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1715 /* convert a class handle to the corresponding class pointer */
1716 static struct hfsc_class *
1717 clh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
1720 struct hfsc_class *cl;
1725 * first, try optimistically the slot matching the lower bits of
1726 * the handle. if it fails, do the linear table search.
1728 i = chandle % HFSC_MAX_CLASSES;
1729 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1731 for (i = 0; i < HFSC_MAX_CLASSES; i++)
1732 if ((cl = hif->hif_class_tbl[i]) != NULL &&
1733 cl->cl_handle == chandle)
1739 static struct hfsc_if *
1740 hfsc_attach(ifq, bandwidth)
1744 struct hfsc_if *hif;
1746 MALLOC(hif, struct hfsc_if *, sizeof(struct hfsc_if),
1747 M_DEVBUF, M_WAITOK);
1750 bzero(hif, sizeof(struct hfsc_if));
1752 hif->hif_eligible = ellist_alloc();
1753 if (hif->hif_eligible == NULL) {
1754 FREE(hif, M_DEVBUF);
1760 /* add this state to the hfsc list */
1761 hif->hif_next = hif_list;
1769 struct hfsc_if *hif;
1771 (void)hfsc_clear_interface(hif);
1772 (void)hfsc_class_destroy(hif->hif_rootclass);
1774 /* remove this interface from the hif list */
1775 if (hif_list == hif)
1776 hif_list = hif->hif_next;
1780 for (h = hif_list; h != NULL; h = h->hif_next)
1781 if (h->hif_next == hif) {
1782 h->hif_next = hif->hif_next;
1788 ellist_destroy(hif->hif_eligible);
1790 FREE(hif, M_DEVBUF);
1796 hfsc_class_modify(cl, rsc, fsc, usc)
1797 struct hfsc_class *cl;
1798 struct service_curve *rsc, *fsc, *usc;
1800 struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp;
1804 rsc_tmp = fsc_tmp = usc_tmp = NULL;
1805 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
1806 cl->cl_rsc == NULL) {
1807 MALLOC(rsc_tmp, struct internal_sc *,
1808 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
1809 if (rsc_tmp == NULL)
1812 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
1813 cl->cl_fsc == NULL) {
1814 MALLOC(fsc_tmp, struct internal_sc *,
1815 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
1816 if (fsc_tmp == NULL)
1819 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) &&
1820 cl->cl_usc == NULL) {
1821 MALLOC(usc_tmp, struct internal_sc *,
1822 sizeof(struct internal_sc), M_DEVBUF, M_WAITOK);
1823 if (usc_tmp == NULL)
1827 cur_time = read_machclk();
1833 IFQ_LOCK(cl->cl_hif->hif_ifq);
1836 if (rsc->m1 == 0 && rsc->m2 == 0) {
1837 if (cl->cl_rsc != NULL) {
1838 if (!qempty(cl->cl_q))
1840 FREE(cl->cl_rsc, M_DEVBUF);
1844 if (cl->cl_rsc == NULL)
1845 cl->cl_rsc = rsc_tmp;
1846 sc2isc(rsc, cl->cl_rsc);
1847 rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time,
1849 cl->cl_eligible = cl->cl_deadline;
1850 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
1851 cl->cl_eligible.dx = 0;
1852 cl->cl_eligible.dy = 0;
1858 if (fsc->m1 == 0 && fsc->m2 == 0) {
1859 if (cl->cl_fsc != NULL) {
1860 if (!qempty(cl->cl_q))
1862 FREE(cl->cl_fsc, M_DEVBUF);
1866 if (cl->cl_fsc == NULL)
1867 cl->cl_fsc = fsc_tmp;
1868 sc2isc(fsc, cl->cl_fsc);
1869 rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt,
1875 if (usc->m1 == 0 && usc->m2 == 0) {
1876 if (cl->cl_usc != NULL) {
1877 FREE(cl->cl_usc, M_DEVBUF);
1882 if (cl->cl_usc == NULL)
1883 cl->cl_usc = usc_tmp;
1884 sc2isc(usc, cl->cl_usc);
1885 rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time,
1890 if (!qempty(cl->cl_q)) {
1891 if (cl->cl_rsc != NULL)
1892 update_ed(cl, m_pktlen(qhead(cl->cl_q)));
1893 if (cl->cl_fsc != NULL)
1894 update_vf(cl, 0, cur_time);
1895 /* is this enough? */
1898 IFQ_UNLOCK(cl->cl_hif->hif_ifq);
1905 * hfsc device interface
1908 hfscopen(dev, flag, fmt, p)
1911 #if (__FreeBSD_version > 500000)
1917 if (machclk_freq == 0)
1920 if (machclk_freq == 0) {
1921 printf("hfsc: no cpu clock available!\n");
1925 /* everything will be done when the queueing scheme is attached. */
1930 hfscclose(dev, flag, fmt, p)
1933 #if (__FreeBSD_version > 500000)
1939 struct hfsc_if *hif;
1942 while ((hif = hif_list) != NULL) {
1944 if (ALTQ_IS_ENABLED(hif->hif_ifq))
1945 altq_disable(hif->hif_ifq);
1947 err = altq_detach(hif->hif_ifq);
1949 err = hfsc_detach(hif);
1950 if (err != 0 && error == 0)
1958 hfscioctl(dev, cmd, addr, flag, p)
1963 #if (__FreeBSD_version > 500000)
1969 struct hfsc_if *hif;
1970 struct hfsc_interface *ifacep;
1973 /* check super-user privilege */
1978 #if (__FreeBSD_version > 700000)
1979 if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
1981 #elsif (__FreeBSD_version > 400000)
1982 if ((error = suser(p)) != 0)
1985 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1993 case HFSC_IF_ATTACH:
1994 error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1997 case HFSC_IF_DETACH:
1998 error = hfsccmd_if_detach((struct hfsc_interface *)addr);
2003 case HFSC_CLEAR_HIERARCHY:
2004 ifacep = (struct hfsc_interface *)addr;
2005 if ((hif = altq_lookup(ifacep->hfsc_ifname,
2006 ALTQT_HFSC)) == NULL) {
2014 if (hif->hif_defaultclass == NULL) {
2016 printf("hfsc: no default class\n");
2021 error = altq_enable(hif->hif_ifq);
2025 error = altq_disable(hif->hif_ifq);
2028 case HFSC_CLEAR_HIERARCHY:
2029 hfsc_clear_interface(hif);
2034 case HFSC_ADD_CLASS:
2035 error = hfsccmd_add_class((struct hfsc_add_class *)addr);
2038 case HFSC_DEL_CLASS:
2039 error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
2042 case HFSC_MOD_CLASS:
2043 error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
2046 case HFSC_ADD_FILTER:
2047 error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
2050 case HFSC_DEL_FILTER:
2051 error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
2055 error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
2066 hfsccmd_if_attach(ap)
2067 struct hfsc_attach *ap;
2069 struct hfsc_if *hif;
2073 if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
2076 if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
2080 * set HFSC to this ifnet structure.
2082 if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
2083 hfsc_enqueue, hfsc_dequeue, hfsc_request,
2084 &hif->hif_classifier, acc_classify)) != 0)
2085 (void)hfsc_detach(hif);
2091 hfsccmd_if_detach(ap)
2092 struct hfsc_interface *ap;
2094 struct hfsc_if *hif;
2097 if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
2100 if (ALTQ_IS_ENABLED(hif->hif_ifq))
2101 altq_disable(hif->hif_ifq);
2103 if ((error = altq_detach(hif->hif_ifq)))
2106 return hfsc_detach(hif);
2110 hfsccmd_add_class(ap)
2111 struct hfsc_add_class *ap;
2113 struct hfsc_if *hif;
2114 struct hfsc_class *cl, *parent;
2117 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2120 if (ap->parent_handle == HFSC_NULLCLASS_HANDLE &&
2121 hif->hif_rootclass == NULL)
2123 else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL)
2126 /* assign a class handle (use a free slot number for now) */
2127 for (i = 1; i < HFSC_MAX_CLASSES; i++)
2128 if (hif->hif_class_tbl[i] == NULL)
2130 if (i == HFSC_MAX_CLASSES)
2133 if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL,
2134 parent, ap->qlimit, ap->flags, i)) == NULL)
2137 /* return a class handle to the user */
2138 ap->class_handle = i;
2144 hfsccmd_delete_class(ap)
2145 struct hfsc_delete_class *ap;
2147 struct hfsc_if *hif;
2148 struct hfsc_class *cl;
2150 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2153 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2156 return hfsc_class_destroy(cl);
2160 hfsccmd_modify_class(ap)
2161 struct hfsc_modify_class *ap;
2163 struct hfsc_if *hif;
2164 struct hfsc_class *cl;
2165 struct service_curve *rsc = NULL;
2166 struct service_curve *fsc = NULL;
2167 struct service_curve *usc = NULL;
2169 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2172 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2175 if (ap->sctype & HFSC_REALTIMESC)
2176 rsc = &ap->service_curve;
2177 if (ap->sctype & HFSC_LINKSHARINGSC)
2178 fsc = &ap->service_curve;
2179 if (ap->sctype & HFSC_UPPERLIMITSC)
2180 usc = &ap->service_curve;
2182 return hfsc_class_modify(cl, rsc, fsc, usc);
2186 hfsccmd_add_filter(ap)
2187 struct hfsc_add_filter *ap;
2189 struct hfsc_if *hif;
2190 struct hfsc_class *cl;
2192 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2195 if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2198 if (is_a_parent_class(cl)) {
2200 printf("hfsccmd_add_filter: not a leaf class!\n");
2205 return acc_add_filter(&hif->hif_classifier, &ap->filter,
2206 cl, &ap->filter_handle);
2210 hfsccmd_delete_filter(ap)
2211 struct hfsc_delete_filter *ap;
2213 struct hfsc_if *hif;
2215 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2218 return acc_delete_filter(&hif->hif_classifier,
2223 hfsccmd_class_stats(ap)
2224 struct hfsc_class_stats *ap;
2226 struct hfsc_if *hif;
2227 struct hfsc_class *cl;
2228 struct hfsc_classstats stats, *usp;
2229 int n, nclasses, error;
2231 if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2234 ap->cur_time = read_machclk();
2235 ap->machclk_freq = machclk_freq;
2236 ap->hif_classes = hif->hif_classes;
2237 ap->hif_packets = hif->hif_packets;
2239 /* skip the first N classes in the tree */
2240 nclasses = ap->nskip;
2241 for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
2242 cl = hfsc_nextclass(cl), n++)
2247 /* then, read the next N classes in the tree */
2248 nclasses = ap->nclasses;
2250 for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
2252 get_class_stats(&stats, cl);
2254 if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
2255 sizeof(stats))) != 0)
2266 static struct altqsw hfsc_sw =
2267 {"hfsc", hfscopen, hfscclose, hfscioctl};
2269 ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
2270 MODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1);
2271 MODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1);
2273 #endif /* KLD_MODULE */
2274 #endif /* ALTQ3_COMPAT */
2276 #endif /* ALTQ_HFSC */