2 * Copyright (c) 1991-1997 Regents of the University of California.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the Network Research
16 * Group at Lawrence Berkeley Laboratory.
17 * 4. Neither the name of the University nor of the Laboratory may be used
18 * to endorse or promote products derived from this software without
19 * specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * LBL code modified by speer@eng.sun.com, May 1977.
34 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
36 * @(#)rm_class.c 1.48 97/12/05 SMI
37 * $KAME: altq_rmclass.c,v 1.19 2005/04/13 03:44:25 suz Exp $
42 #include "opt_inet6.h"
43 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
45 #include <sys/param.h>
46 #include <sys/malloc.h>
48 #include <sys/socket.h>
49 #include <sys/systm.h>
50 #include <sys/errno.h>
54 #include <net/if_var.h>
56 #include <net/altq/if_altq.h>
57 #include <net/altq/altq.h>
58 #include <net/altq/altq_codel.h>
59 #include <net/altq/altq_rmclass.h>
60 #include <net/altq/altq_rmclass_debug.h>
61 #include <net/altq/altq_red.h>
62 #include <net/altq/altq_rio.h>
68 #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
74 static int rmc_satisfied(struct rm_class *, struct timeval *);
75 static void rmc_wrr_set_weights(struct rm_ifdat *);
76 static void rmc_depth_compute(struct rm_class *);
77 static void rmc_depth_recompute(rm_class_t *);
79 static mbuf_t *_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
80 static mbuf_t *_rmc_prr_dequeue_next(struct rm_ifdat *, int);
82 static int _rmc_addq(rm_class_t *, mbuf_t *);
83 static void _rmc_dropq(rm_class_t *);
84 static mbuf_t *_rmc_getq(rm_class_t *);
85 static mbuf_t *_rmc_pollq(rm_class_t *);
87 static int rmc_under_limit(struct rm_class *, struct timeval *);
88 static void rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
89 static void rmc_drop_action(struct rm_class *);
90 static void rmc_restart(struct rm_class *);
91 static void rmc_root_overlimit(struct rm_class *, struct rm_class *);
93 #define BORROW_OFFTIME
95 * BORROW_OFFTIME (experimental):
96 * borrow the offtime of the class borrowing from.
97 * the reason is that when its own offtime is set, the class is unable
98 * to borrow much, especially when cutoff is taking effect.
99 * but when the borrowed class is overloaded (advidle is close to minidle),
100 * use the borrowing class's offtime to avoid overload.
102 #define ADJUST_CUTOFF
104 * ADJUST_CUTOFF (experimental):
105 * if no underlimit class is found due to cutoff, increase cutoff and
106 * retry the scheduling loop.
107 * also, don't invoke delay_actions while cutoff is taking effect,
108 * since a sleeping class won't have a chance to be scheduled in the
111 * now heuristics for setting the top-level variable (cutoff_) becomes:
112 * 1. if a packet arrives for a not-overlimit class, set cutoff
113 * to the depth of the class.
114 * 2. if cutoff is i, and a packet arrives for an overlimit class
115 * with an underlimit ancestor at a lower level than i (say j),
116 * then set cutoff to j.
117 * 3. at scheduling a packet, if there is no underlimit class
118 * due to the current cutoff level, increase cutoff by 1 and
119 * then try to schedule again.
124 * rmc_newclass(...) - Create a new resource management class at priority
125 * 'pri' on the interface given by 'ifd'.
127 * nsecPerByte is the data rate of the interface in nanoseconds/byte.
128 * E.g., 800 for a 10Mb/s ethernet. If the class gets less
129 * than 100% of the bandwidth, this number should be the
130 * 'effective' rate for the class. Let f be the
131 * bandwidth fraction allocated to this class, and let
132 * nsPerByte be the data rate of the output link in
133 * nanoseconds/byte. Then nsecPerByte is set to
134 * nsPerByte / f. E.g., 1600 (= 800 / .5)
135 * for a class that gets 50% of an ethernet's bandwidth.
137 * action the routine to call when the class is over limit.
139 * maxq max allowable queue size for class (in packets).
141 * parent parent class pointer.
143 * borrow class to borrow from (should be either 'parent' or null).
145 * maxidle max value allowed for class 'idle' time estimate (this
146 * parameter determines how large an initial burst of packets
147 * can be before overlimit action is invoked.
149 * offtime how long 'delay' action will delay when class goes over
150 * limit (this parameter determines the steady-state burst
151 * size when a class is running over its limit).
153 * Maxidle and offtime have to be computed from the following: If the
154 * average packet size is s, the bandwidth fraction allocated to this
155 * class is f, we want to allow b packet bursts, and the gain of the
156 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
158 * ptime = s * nsPerByte * (1 - f) / f
159 * maxidle = ptime * (1 - g^b) / g^b
160 * minidle = -ptime * (1 / (f - 1))
161 * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
163 * Operationally, it's convenient to specify maxidle & offtime in units
164 * independent of the link bandwidth so the maxidle & offtime passed to
165 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
166 * (The constant factor is a scale factor needed to make the parameters
167 * integers. This scaling also means that the 'unscaled' values of
168 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
169 * not nanoseconds.) Also note that the 'idle' filter computation keeps
170 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
171 * maxidle also must be scaled upward by this value. Thus, the passed
172 * values for maxidle and offtime can be computed as follows:
174 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
175 * offtime = offtime * 8 / (1000 * nsecPerByte)
177 * When USE_HRTIME is employed, then maxidle and offtime become:
178 * maxidle = maxilde * (8.0 / nsecPerByte);
179 * offtime = offtime * (8.0 / nsecPerByte);
182 rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
183 void (*action)(rm_class_t *, rm_class_t *), int maxq,
184 struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
185 int minidle, u_int offtime, int pktsize, int flags)
188 struct rm_class *peer;
191 if (pri >= RM_MAXPRIO)
194 if (flags & RMCF_RED) {
196 printf("rmc_newclass: RED not configured for CBQ!\n");
202 if (flags & RMCF_RIO) {
204 printf("rmc_newclass: RIO not configured for CBQ!\n");
210 if (flags & RMCF_CODEL) {
212 printf("rmc_newclass: CODEL not configured for CBQ!\n");
218 cl = malloc(sizeof(struct rm_class), M_DEVBUF, M_NOWAIT | M_ZERO);
221 CALLOUT_INIT(&cl->callout_);
222 cl->q_ = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO);
223 if (cl->q_ == NULL) {
229 * Class initialization.
231 cl->children_ = NULL;
232 cl->parent_ = parent;
233 cl->borrow_ = borrow;
237 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
240 cl->ns_per_byte_ = nsecPerByte;
242 qlimit(cl->q_) = maxq;
243 qtype(cl->q_) = Q_DROPHEAD;
247 #if 1 /* minidle is also scaled in ALTQ */
248 cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
249 if (cl->minidle_ > 0)
252 cl->minidle_ = minidle;
254 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
255 if (cl->maxidle_ == 0)
257 #if 1 /* offtime is also scaled in ALTQ */
258 cl->avgidle_ = cl->maxidle_;
259 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
260 if (cl->offtime_ == 0)
264 cl->offtime_ = (offtime * nsecPerByte) / 8;
266 cl->overlimit = action;
269 if (flags & (RMCF_RED|RMCF_RIO)) {
270 int red_flags, red_pkttime;
273 if (flags & RMCF_ECN)
274 red_flags |= REDF_ECN;
275 if (flags & RMCF_FLOWVALVE)
276 red_flags |= REDF_FLOWVALVE;
278 if (flags & RMCF_CLEARDSCP)
279 red_flags |= RIOF_CLEARDSCP;
281 red_pkttime = nsecPerByte * pktsize / 1000;
283 if (flags & RMCF_RED) {
284 cl->red_ = red_alloc(0, 0,
285 qlimit(cl->q_) * 10/100,
286 qlimit(cl->q_) * 30/100,
287 red_flags, red_pkttime);
288 if (cl->red_ != NULL)
289 qtype(cl->q_) = Q_RED;
293 cl->red_ = (red_t *)rio_alloc(0, NULL,
294 red_flags, red_pkttime);
295 if (cl->red_ != NULL)
296 qtype(cl->q_) = Q_RIO;
300 #endif /* ALTQ_RED */
302 if (flags & RMCF_CODEL) {
303 cl->codel_ = codel_alloc(5, 100, 0);
304 if (cl->codel_ != NULL)
305 qtype(cl->q_) = Q_CODEL;
310 * put the class into the class tree
314 if ((peer = ifd->active_[pri]) != NULL) {
315 /* find the last class at this pri */
317 while (peer->peer_ != ifd->active_[pri])
321 ifd->active_[pri] = cl;
326 cl->next_ = parent->children_;
327 parent->children_ = cl;
332 * Compute the depth of this class and its ancestors in the class
335 rmc_depth_compute(cl);
338 * If CBQ's WRR is enabled, then initialize the class WRR state.
342 ifd->alloc_[pri] += cl->allotment_;
343 rmc_wrr_set_weights(ifd);
345 IFQ_UNLOCK(ifd->ifq_);
351 rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
352 int minidle, u_int offtime, int pktsize)
354 struct rm_ifdat *ifd;
359 old_allotment = cl->allotment_;
363 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
365 cl->ns_per_byte_ = nsecPerByte;
367 qlimit(cl->q_) = maxq;
369 #if 1 /* minidle is also scaled in ALTQ */
370 cl->minidle_ = (minidle * nsecPerByte) / 8;
371 if (cl->minidle_ > 0)
374 cl->minidle_ = minidle;
376 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
377 if (cl->maxidle_ == 0)
379 #if 1 /* offtime is also scaled in ALTQ */
380 cl->avgidle_ = cl->maxidle_;
381 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
382 if (cl->offtime_ == 0)
386 cl->offtime_ = (offtime * nsecPerByte) / 8;
390 * If CBQ's WRR is enabled, then initialize the class WRR state.
393 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
394 rmc_wrr_set_weights(ifd);
396 IFQ_UNLOCK(ifd->ifq_);
403 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
404 * the appropriate run robin weights for the CBQ weighted round robin
411 rmc_wrr_set_weights(struct rm_ifdat *ifd)
414 struct rm_class *cl, *clh;
416 for (i = 0; i < RM_MAXPRIO; i++) {
418 * This is inverted from that of the simulator to
419 * maintain precision.
421 if (ifd->num_[i] == 0)
424 ifd->M_[i] = ifd->alloc_[i] /
425 (ifd->num_[i] * ifd->maxpkt_);
427 * Compute the weighted allotment for each class.
428 * This takes the expensive div instruction out
429 * of the main loop for the wrr scheduling path.
430 * These only get recomputed when a class comes or
433 if (ifd->active_[i] != NULL) {
434 clh = cl = ifd->active_[i];
436 /* safe-guard for slow link or alloc_ == 0 */
438 cl->w_allotment_ = 0;
440 cl->w_allotment_ = cl->allotment_ /
443 } while ((cl != NULL) && (cl != clh));
449 rmc_get_weight(struct rm_ifdat *ifd, int pri)
451 if ((pri >= 0) && (pri < RM_MAXPRIO))
452 return (ifd->M_[pri]);
459 * rmc_depth_compute(struct rm_class *cl) - This function computes the
460 * appropriate depth of class 'cl' and its ancestors.
466 rmc_depth_compute(struct rm_class *cl)
468 rm_class_t *t = cl, *p;
471 * Recompute the depth for the branch of the tree.
475 if (p && (t->depth_ >= p->depth_)) {
476 p->depth_ = t->depth_ + 1;
485 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
486 * the depth of the tree after a class has been deleted.
492 rmc_depth_recompute(rm_class_t *cl)
499 if ((t = p->children_) == NULL) {
505 if (t->depth_ > cdepth)
510 if (p->depth_ == cdepth + 1)
511 /* no change to this parent */
514 p->depth_ = cdepth + 1;
522 if (cl->depth_ >= 1) {
523 if (cl->children_ == NULL) {
525 } else if ((t = cl->children_) != NULL) {
527 if (t->children_ != NULL)
528 rmc_depth_recompute(t);
532 rmc_depth_compute(cl);
539 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
540 * function deletes a class from the link-sharing structure and frees
541 * all resources associated with the class.
547 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
549 struct rm_class *p, *head, *previous;
552 ASSERT(cl->children_ == NULL);
555 CALLOUT_STOP(&cl->callout_);
560 * Free packets in the packet queue.
561 * XXX - this may not be a desired behavior. Packets should be
567 * If the class has a parent, then remove the class from the
568 * class from the parent's children chain.
570 if (cl->parent_ != NULL) {
571 head = cl->parent_->children_;
573 if (head->next_ == NULL) {
575 cl->parent_->children_ = NULL;
576 cl->parent_->leaf_ = 1;
577 } else while (p != NULL) {
580 cl->parent_->children_ = cl->next_;
582 previous->next_ = cl->next_;
593 * Delete class from class priority peer list.
595 if ((p = ifd->active_[cl->pri_]) != NULL) {
597 * If there is more than one member of this priority
598 * level, then look for class(cl) in the priority level.
601 while (p->peer_ != cl)
603 p->peer_ = cl->peer_;
605 if (ifd->active_[cl->pri_] == cl)
606 ifd->active_[cl->pri_] = cl->peer_;
609 ifd->active_[cl->pri_] = NULL;
614 * Recompute the WRR weights.
617 ifd->alloc_[cl->pri_] -= cl->allotment_;
618 ifd->num_[cl->pri_]--;
619 rmc_wrr_set_weights(ifd);
623 * Re-compute the depth of the tree.
626 rmc_depth_recompute(cl->parent_);
628 rmc_depth_recompute(ifd->root_);
631 IFQ_UNLOCK(ifd->ifq_);
635 * Free the class structure.
637 if (cl->red_ != NULL) {
639 if (q_is_rio(cl->q_))
640 rio_destroy((rio_t *)cl->red_);
643 if (q_is_red(cl->q_))
644 red_destroy(cl->red_);
647 if (q_is_codel(cl->q_))
648 codel_destroy(cl->codel_);
651 free(cl->q_, M_DEVBUF);
658 * rmc_init(...) - Initialize the resource management data structures
659 * associated with the output portion of interface 'ifp'. 'ifd' is
660 * where the structures will be built (for backwards compatibility, the
661 * structures aren't kept in the ifnet struct). 'nsecPerByte'
662 * gives the link speed (inverse of bandwidth) in nanoseconds/byte.
663 * 'restart' is the driver-specific routine that the generic 'delay
664 * until under limit' action will call to restart output. `maxq'
665 * is the queue size of the 'link' & 'default' classes. 'maxqueued'
666 * is the maximum number of packets that the resource management
667 * code will allow to be queued 'downstream' (this is typically 1).
673 rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
674 void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
675 int minidle, u_int offtime, int flags)
680 * Initialize the CBQ tracing/debug facility.
684 bzero((char *)ifd, sizeof (*ifd));
685 mtu = ifq->altq_ifp->if_mtu;
687 ifd->restart = restart;
688 ifd->maxqueued_ = maxqueued;
689 ifd->ns_per_byte_ = nsecPerByte;
691 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
692 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
694 ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
695 if (mtu * nsecPerByte > 10 * 1000000)
696 ifd->maxiftime_ /= 4;
700 CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
703 * Initialize the CBQ's WRR state.
705 for (i = 0; i < RM_MAXPRIO; i++) {
710 ifd->active_[i] = NULL;
714 * Initialize current packet state.
718 for (i = 0; i < RM_MAXQUEUED; i++) {
719 ifd->class_[i] = NULL;
721 ifd->borrowed_[i] = NULL;
725 * Create the root class of the link-sharing structure.
727 if ((ifd->root_ = rmc_newclass(0, ifd,
729 rmc_root_overlimit, maxq, 0, 0,
730 maxidle, minidle, offtime,
732 printf("rmc_init: root class not allocated\n");
735 ifd->root_->depth_ = 0;
740 * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by
741 * mbuf 'm' to queue for resource class 'cl'. This routine is called
742 * by a driver's if_output routine. This routine must be called with
743 * output packet completion interrupts locked out (to avoid racing with
746 * Returns: 0 on successful queueing
747 * -1 when packet drop occurs
750 rmc_queue_packet(struct rm_class *cl, mbuf_t *m)
753 struct rm_ifdat *ifd = cl->ifdat_;
755 int is_empty = qempty(cl->q_);
758 if (ifd->cutoff_ > 0) {
759 if (TV_LT(&cl->undertime_, &now)) {
760 if (ifd->cutoff_ > cl->depth_)
761 ifd->cutoff_ = cl->depth_;
762 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
767 * the class is overlimit. if the class has
768 * underlimit ancestors, set cutoff to the lowest
771 struct rm_class *borrow = cl->borrow_;
773 while (borrow != NULL &&
774 borrow->depth_ < ifd->cutoff_) {
775 if (TV_LT(&borrow->undertime_, &now)) {
776 ifd->cutoff_ = borrow->depth_;
777 CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
780 borrow = borrow->borrow_;
784 else if ((ifd->cutoff_ > 1) && cl->borrow_) {
785 if (TV_LT(&cl->borrow_->undertime_, &now)) {
786 ifd->cutoff_ = cl->borrow_->depth_;
787 CBQTRACE(rmc_queue_packet, 'ffob',
788 cl->borrow_->depth_);
794 if (_rmc_addq(cl, m) < 0)
799 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
803 if (qlen(cl->q_) > qlimit(cl->q_)) {
804 /* note: qlimit can be set to 0 or 1 */
813 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
814 * classes to see if there are satified.
818 rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
823 for (i = RM_MAXPRIO - 1; i >= 0; i--) {
824 if ((bp = ifd->active_[i]) != NULL) {
827 if (!rmc_satisfied(p, now)) {
828 ifd->cutoff_ = p->depth_;
840 * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise.
844 rmc_satisfied(struct rm_class *cl, struct timeval *now)
850 if (TV_LT(now, &cl->undertime_))
852 if (cl->depth_ == 0) {
853 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
858 if (cl->children_ != NULL) {
861 if (!rmc_satisfied(p, now))
871 * Return 1 if class 'cl' is under limit or can borrow from a parent,
872 * 0 if overlimit. As a side-effect, this routine will invoke the
873 * class overlimit action if the class if overlimit.
877 rmc_under_limit(struct rm_class *cl, struct timeval *now)
881 struct rm_ifdat *ifd = cl->ifdat_;
883 ifd->borrowed_[ifd->qi_] = NULL;
885 * If cl is the root class, then always return that it is
886 * underlimit. Otherwise, check to see if the class is underlimit.
888 if (cl->parent_ == NULL)
892 if (TV_LT(now, &cl->undertime_))
895 CALLOUT_STOP(&cl->callout_);
897 cl->undertime_.tv_sec = 0;
902 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
903 if (((cl = cl->borrow_) == NULL) ||
904 (cl->depth_ > ifd->cutoff_)) {
907 /* cutoff is taking effect, just
908 return false without calling
912 #ifdef BORROW_OFFTIME
914 * check if the class can borrow offtime too.
915 * borrow offtime from the top of the borrow
916 * chain if the top class is not overloaded.
919 /* cutoff is taking effect, use this class as top. */
921 CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
923 if (top != NULL && top->avgidle_ == top->minidle_)
926 (p->overlimit)(p, top);
929 (p->overlimit)(p, NULL);
937 ifd->borrowed_[ifd->qi_] = cl;
942 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
943 * Packet-by-packet round robin.
945 * The heart of the weighted round-robin scheduler, which decides which
946 * class next gets to send a packet. Highest priority first, then
947 * weighted round-robin within priorites.
949 * Each able-to-send class gets to send until its byte allocation is
950 * exhausted. Thus, the active pointer is only changed after a class has
951 * exhausted its allocation.
953 * If the scheduler finds no class that is underlimit or able to borrow,
954 * then the first class found that had a nonzero queue and is allowed to
955 * borrow gets to send.
959 _rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
961 struct rm_class *cl = NULL, *first = NULL;
970 * if the driver polls the top of the queue and then removes
971 * the polled packet, we must return the same packet.
973 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
974 cl = ifd->pollcache_;
976 if (ifd->efficient_) {
977 /* check if this class is overlimit */
978 if (cl->undertime_.tv_sec != 0 &&
979 rmc_under_limit(cl, &now) == 0)
982 ifd->pollcache_ = NULL;
986 /* mode == ALTDQ_POLL || pollcache == NULL */
987 ifd->pollcache_ = NULL;
988 ifd->borrowed_[ifd->qi_] = NULL;
993 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
994 if (ifd->na_[cpri] == 0)
998 * Loop through twice for a priority level, if some class
999 * was unable to send a packet the first round because
1000 * of the weighted round-robin mechanism.
1001 * During the second loop at this level, deficit==2.
1002 * (This second loop is not needed if for every class,
1003 * "M[cl->pri_])" times "cl->allotment" is greater than
1004 * the byte size for the largest packet in the class.)
1007 cl = ifd->active_[cpri];
1010 if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
1011 cl->bytes_alloc_ += cl->w_allotment_;
1012 if (!qempty(cl->q_)) {
1013 if ((cl->undertime_.tv_sec == 0) ||
1014 rmc_under_limit(cl, &now)) {
1015 if (cl->bytes_alloc_ > 0 || deficit > 1)
1018 /* underlimit but no alloc */
1021 ifd->borrowed_[ifd->qi_] = NULL;
1024 else if (first == NULL && cl->borrow_ != NULL)
1025 first = cl; /* borrowing candidate */
1028 cl->bytes_alloc_ = 0;
1030 } while (cl != ifd->active_[cpri]);
1033 /* first loop found an underlimit class with deficit */
1034 /* Loop on same priority level, with new deficit. */
1040 #ifdef ADJUST_CUTOFF
1042 * no underlimit class found. if cutoff is taking effect,
1043 * increase cutoff and try again.
1045 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1047 CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1050 #endif /* ADJUST_CUTOFF */
1052 * If LINK_EFFICIENCY is turned on, then the first overlimit
1053 * class we encounter will send a packet if all the classes
1054 * of the link-sharing structure are overlimit.
1057 CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1059 if (!ifd->efficient_ || first == NULL)
1064 #if 0 /* too time-consuming for nothing */
1066 CALLOUT_STOP(&cl->callout_);
1068 cl->undertime_.tv_sec = 0;
1070 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1071 ifd->cutoff_ = cl->borrow_->depth_;
1074 * Deque the packet and do the book keeping...
1077 if (op == ALTDQ_REMOVE) {
1080 panic("_rmc_wrr_dequeue_next");
1085 * Update class statistics and link data.
1087 if (cl->bytes_alloc_ > 0)
1088 cl->bytes_alloc_ -= m_pktlen(m);
1090 if ((cl->bytes_alloc_ <= 0) || first == cl)
1091 ifd->active_[cl->pri_] = cl->peer_;
1093 ifd->active_[cl->pri_] = cl;
1095 ifd->class_[ifd->qi_] = cl;
1096 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1097 ifd->now_[ifd->qi_] = now;
1098 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1101 /* mode == ALTDQ_PPOLL */
1103 ifd->pollcache_ = cl;
1109 * Dequeue & return next packet from the highest priority class that
1110 * has a packet to send & has enough allocation to send it. This
1111 * routine is called by a driver whenever it needs a new packet to
1115 _rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1119 struct rm_class *cl, *first = NULL;
1125 * if the driver polls the top of the queue and then removes
1126 * the polled packet, we must return the same packet.
1128 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1129 cl = ifd->pollcache_;
1131 ifd->pollcache_ = NULL;
1134 /* mode == ALTDQ_POLL || pollcache == NULL */
1135 ifd->pollcache_ = NULL;
1136 ifd->borrowed_[ifd->qi_] = NULL;
1138 #ifdef ADJUST_CUTOFF
1141 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1142 if (ifd->na_[cpri] == 0)
1144 cl = ifd->active_[cpri];
1147 if (!qempty(cl->q_)) {
1148 if ((cl->undertime_.tv_sec == 0) ||
1149 rmc_under_limit(cl, &now))
1151 if (first == NULL && cl->borrow_ != NULL)
1155 } while (cl != ifd->active_[cpri]);
1158 #ifdef ADJUST_CUTOFF
1160 * no underlimit class found. if cutoff is taking effect, increase
1161 * cutoff and try again.
1163 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1167 #endif /* ADJUST_CUTOFF */
1169 * If LINK_EFFICIENCY is turned on, then the first overlimit
1170 * class we encounter will send a packet if all the classes
1171 * of the link-sharing structure are overlimit.
1174 if (!ifd->efficient_ || first == NULL)
1179 #if 0 /* too time-consuming for nothing */
1181 CALLOUT_STOP(&cl->callout_);
1183 cl->undertime_.tv_sec = 0;
1185 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1186 ifd->cutoff_ = cl->borrow_->depth_;
1189 * Deque the packet and do the book keeping...
1192 if (op == ALTDQ_REMOVE) {
1195 panic("_rmc_prr_dequeue_next");
1199 ifd->active_[cpri] = cl->peer_;
1201 ifd->class_[ifd->qi_] = cl;
1202 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1203 ifd->now_[ifd->qi_] = now;
1204 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1207 /* mode == ALTDQ_POLL */
1209 ifd->pollcache_ = cl;
1216 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1217 * is invoked by the packet driver to get the next packet to be
1218 * dequeued and output on the link. If WRR is enabled, then the
1219 * WRR dequeue next routine will determine the next packet to sent.
1220 * Otherwise, packet-by-packet round robin is invoked.
1222 * Returns: NULL, if a packet is not available or if all
1223 * classes are overlimit.
1225 * Otherwise, Pointer to the next packet.
1229 rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1231 if (ifd->queued_ >= ifd->maxqueued_)
1234 return (_rmc_wrr_dequeue_next(ifd, mode));
1236 return (_rmc_prr_dequeue_next(ifd, mode));
1240 * Update the utilization estimate for the packet that just completed.
1241 * The packet's class & the parent(s) of that class all get their
1242 * estimators updated. This routine is called by the driver's output-
1243 * packet-completion interrupt service routine.
1247 * a macro to approximate "divide by 1000" that gives 0.000999,
1248 * if a value has enough effective digits.
1249 * (on pentium, mul takes 9 cycles but div takes 46!)
1251 #define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1253 rmc_update_class_util(struct rm_ifdat *ifd)
1255 int idle, avgidle, pktlen;
1256 int pkt_time, tidle;
1257 rm_class_t *cl, *borrowed;
1258 rm_class_t *borrows;
1259 struct timeval *nowp;
1262 * Get the most recent completed class.
1264 if ((cl = ifd->class_[ifd->qo_]) == NULL)
1267 pktlen = ifd->curlen_[ifd->qo_];
1268 borrowed = ifd->borrowed_[ifd->qo_];
1271 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1274 * Run estimator on class and its ancestors.
1277 * rm_update_class_util is designed to be called when the
1278 * transfer is completed from a xmit complete interrupt,
1279 * but most drivers don't implement an upcall for that.
1280 * so, just use estimated completion time.
1281 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1283 nowp = &ifd->now_[ifd->qo_];
1284 /* get pkt_time (for link) in usec */
1285 #if 1 /* use approximation */
1286 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1287 pkt_time = NSEC_TO_USEC(pkt_time);
1289 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1291 #if 1 /* ALTQ4PPP */
1292 if (TV_LT(nowp, &ifd->ifnow_)) {
1296 * make sure the estimated completion time does not go
1297 * too far. it can happen when the link layer supports
1298 * data compression or the interface speed is set to
1299 * a much lower value.
1301 TV_DELTA(&ifd->ifnow_, nowp, iftime);
1302 if (iftime+pkt_time < ifd->maxiftime_) {
1303 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1305 TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1308 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1311 if (TV_LT(nowp, &ifd->ifnow_)) {
1312 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1314 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1318 while (cl != NULL) {
1319 TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1320 if (idle >= 2000000)
1322 * this class is idle enough, reset avgidle.
1323 * (TV_DELTA returns 2000000 us when delta is large.)
1325 cl->avgidle_ = cl->maxidle_;
1327 /* get pkt_time (for class) in usec */
1328 #if 1 /* use approximation */
1329 pkt_time = pktlen * cl->ns_per_byte_;
1330 pkt_time = NSEC_TO_USEC(pkt_time);
1332 pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1336 avgidle = cl->avgidle_;
1337 avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1338 cl->avgidle_ = avgidle;
1340 /* Are we overlimit ? */
1342 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1345 * need some lower bound for avgidle, otherwise
1346 * a borrowing class gets unbounded penalty.
1348 if (avgidle < cl->minidle_)
1349 avgidle = cl->avgidle_ = cl->minidle_;
1351 /* set next idle to make avgidle 0 */
1353 (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1354 TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1358 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1359 cl->undertime_.tv_sec = 0;
1360 if (cl->sleeping_) {
1361 CALLOUT_STOP(&cl->callout_);
1366 if (borrows != NULL) {
1368 ++cl->stats_.borrows;
1372 cl->last_ = ifd->ifnow_;
1373 cl->last_pkttime_ = pkt_time;
1376 if (cl->parent_ == NULL) {
1377 /* take stats of root class */
1378 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1386 * Check to see if cutoff needs to set to a new level.
1388 cl = ifd->class_[ifd->qo_];
1389 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1391 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1392 rmc_tl_satisfied(ifd, nowp);
1393 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1395 ifd->cutoff_ = borrowed->depth_;
1396 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1399 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1402 rmc_tl_satisfied(ifd, &now);
1404 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1406 ifd->cutoff_ = borrowed->depth_;
1407 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1413 * Release class slot
1415 ifd->borrowed_[ifd->qo_] = NULL;
1416 ifd->class_[ifd->qo_] = NULL;
1417 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1423 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1424 * over-limit action routines. These get invoked by rmc_under_limit()
1425 * if a class with packets to send if over its bandwidth limit & can't
1426 * borrow from a parent class.
1432 rmc_drop_action(struct rm_class *cl)
1434 struct rm_ifdat *ifd = cl->ifdat_;
1436 ASSERT(qlen(cl->q_) > 0);
1439 ifd->na_[cl->pri_]--;
1442 void rmc_dropall(struct rm_class *cl)
1444 struct rm_ifdat *ifd = cl->ifdat_;
1446 if (!qempty(cl->q_)) {
1449 ifd->na_[cl->pri_]--;
1453 #if (__FreeBSD_version > 300000)
1454 /* hzto() is removed from FreeBSD-3.0 */
1455 static int hzto(struct timeval *);
1464 t2.tv_sec = tv->tv_sec - t2.tv_sec;
1465 t2.tv_usec = tv->tv_usec - t2.tv_usec;
1466 return (tvtohz(&t2));
1468 #endif /* __FreeBSD_version > 300000 */
1472 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1473 * delay action routine. It is invoked via rmc_under_limit when the
1474 * packet is discoverd to be overlimit.
1476 * If the delay action is result of borrow class being overlimit, then
1477 * delay for the offtime of the borrowing class that is overlimit.
1483 rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1485 int delay, t, extradelay;
1487 cl->stats_.overactions++;
1488 TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1489 #ifndef BORROW_OFFTIME
1490 delay += cl->offtime_;
1493 if (!cl->sleeping_) {
1494 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1495 #ifdef BORROW_OFFTIME
1497 extradelay = borrow->offtime_;
1500 extradelay = cl->offtime_;
1504 * XXX recalculate suspend time:
1505 * current undertime is (tidle + pkt_time) calculated
1506 * from the last transmission.
1507 * tidle: time required to bring avgidle back to 0
1508 * pkt_time: target waiting time for this class
1509 * we need to replace pkt_time by offtime
1511 extradelay -= cl->last_pkttime_;
1513 if (extradelay > 0) {
1514 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1515 delay += extradelay;
1519 cl->stats_.delays++;
1522 * Since packets are phased randomly with respect to the
1523 * clock, 1 tick (the next clock tick) can be an arbitrarily
1524 * short time so we have to wait for at least two ticks.
1525 * NOTE: If there's no other traffic, we need the timer as
1526 * a 'backstop' to restart this class.
1528 if (delay > tick * 2) {
1529 /* FreeBSD rounds up the tick */
1530 t = hzto(&cl->undertime_);
1533 CALLOUT_RESET(&cl->callout_, t,
1534 (timeout_t *)rmc_restart, (caddr_t)cl);
1540 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1541 * called by the system timer code & is responsible checking if the
1542 * class is still sleeping (it might have been restarted as a side
1543 * effect of the queue scan on a packet arrival) and, if so, restarting
1544 * output for the class. Inspecting the class state & restarting output
1545 * require locking the class structure. In general the driver is
1546 * responsible for locking but this is the only routine that is not
1547 * called directly or indirectly from the interface driver so it has
1548 * know about system locking conventions. Under bsd, locking is done
1549 * by raising IPL to splimp so that's what's implemented here. On a
1550 * different system this would probably need to be changed.
1556 rmc_restart(struct rm_class *cl)
1558 struct rm_ifdat *ifd = cl->ifdat_;
1562 IFQ_LOCK(ifd->ifq_);
1563 if (cl->sleeping_) {
1565 cl->undertime_.tv_sec = 0;
1567 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1568 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1569 (ifd->restart)(ifd->ifq_);
1572 IFQ_UNLOCK(ifd->ifq_);
1578 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1579 * handling routine for the root class of the link sharing structure.
1585 rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1587 panic("rmc_root_overlimit");
1591 * Packet Queue handling routines. Eventually, this is to localize the
1592 * effects on the code whether queues are red queues or droptail
1597 _rmc_addq(rm_class_t *cl, mbuf_t *m)
1600 if (q_is_rio(cl->q_))
1601 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1604 if (q_is_red(cl->q_))
1605 return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1606 #endif /* ALTQ_RED */
1608 if (q_is_codel(cl->q_))
1609 return codel_addq(cl->codel_, cl->q_, m);
1612 if (cl->flags_ & RMCF_CLEARDSCP)
1613 write_dsfield(m, cl->pktattr_, 0);
1619 /* note: _rmc_dropq is not called for red */
1621 _rmc_dropq(rm_class_t *cl)
1625 if ((m = _getq(cl->q_)) != NULL)
1630 _rmc_getq(rm_class_t *cl)
1633 if (q_is_rio(cl->q_))
1634 return rio_getq((rio_t *)cl->red_, cl->q_);
1637 if (q_is_red(cl->q_))
1638 return red_getq(cl->red_, cl->q_);
1641 if (q_is_codel(cl->q_))
1642 return codel_getq(cl->codel_, cl->q_);
1644 return _getq(cl->q_);
1648 _rmc_pollq(rm_class_t *cl)
1650 return qhead(cl->q_);
1655 struct cbqtrace cbqtrace_buffer[NCBQTRACE+1];
1656 struct cbqtrace *cbqtrace_ptr = NULL;
1660 * DDB hook to trace cbq events:
1661 * the last 1024 events are held in a circular buffer.
1662 * use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1664 void cbqtrace_dump(int);
1665 static char *rmc_funcname(void *);
1667 static struct rmc_funcs {
1672 rmc_init, "rmc_init",
1673 rmc_queue_packet, "rmc_queue_packet",
1674 rmc_under_limit, "rmc_under_limit",
1675 rmc_update_class_util, "rmc_update_class_util",
1676 rmc_delay_action, "rmc_delay_action",
1677 rmc_restart, "rmc_restart",
1678 _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next",
1682 static char *rmc_funcname(void *func)
1684 struct rmc_funcs *fp;
1686 for (fp = rmc_funcs; fp->func != NULL; fp++)
1687 if (fp->func == func)
1692 void cbqtrace_dump(int counter)
1697 counter = counter % NCBQTRACE;
1698 p = (int *)&cbqtrace_buffer[counter];
1700 for (i=0; i<20; i++) {
1701 printf("[0x%x] ", *p++);
1702 printf("%s: ", rmc_funcname((void *)*p++));
1704 printf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1705 printf("%d\n",*p++);
1707 if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1708 p = (int *)cbqtrace_buffer;
1711 #endif /* CBQ_TRACE */
1712 #endif /* ALTQ_CBQ */
1714 #if defined(ALTQ_CBQ) || defined(ALTQ_RED) || defined(ALTQ_RIO) || \
1715 defined(ALTQ_HFSC) || defined(ALTQ_PRIQ) || defined(ALTQ_CODEL)
1716 #if !defined(__GNUC__) || defined(ALTQ_DEBUG)
1719 _addq(class_queue_t *q, mbuf_t *m)
1723 if ((m0 = qtail(q)) != NULL)
1724 m->m_nextpkt = m0->m_nextpkt;
1733 _getq(class_queue_t *q)
1737 if ((m = qtail(q)) == NULL)
1739 if ((m0 = m->m_nextpkt) != m)
1740 m->m_nextpkt = m0->m_nextpkt;
1742 ASSERT(qlen(q) == 1);
1746 m0->m_nextpkt = NULL;
1750 /* drop a packet at the tail of the queue */
1752 _getq_tail(class_queue_t *q)
1754 mbuf_t *m, *m0, *prev;
1756 if ((m = m0 = qtail(q)) == NULL)
1762 prev->m_nextpkt = m->m_nextpkt;
1764 ASSERT(qlen(q) == 1);
1769 m->m_nextpkt = NULL;
1773 /* randomly select a packet in the queue */
1775 _getq_random(class_queue_t *q)
1780 if ((m = qtail(q)) == NULL)
1782 if (m->m_nextpkt == m) {
1783 ASSERT(qlen(q) == 1);
1786 struct mbuf *prev = NULL;
1788 n = arc4random() % qlen(q) + 1;
1789 for (i = 0; i < n; i++) {
1793 prev->m_nextpkt = m->m_nextpkt;
1798 m->m_nextpkt = NULL;
1803 _removeq(class_queue_t *q, mbuf_t *m)
1812 prev->m_nextpkt = m->m_nextpkt;
1815 else if (qtail(q) == m)
1821 _flushq(class_queue_t *q)
1825 while ((m = _getq(q)) != NULL)
1827 ASSERT(qlen(q) == 0);
1830 #endif /* !__GNUC__ || ALTQ_DEBUG */
1831 #endif /* ALTQ_CBQ || ALTQ_RED || ALTQ_RIO || ALTQ_HFSC || ALTQ_PRIQ */