2 /* $KAME: altq_rmclass.c,v 1.19 2005/04/13 03:44:25 suz Exp $ */
5 * Copyright (c) 1991-1997 Regents of the University of California.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the Network Research
19 * Group at Lawrence Berkeley Laboratory.
20 * 4. Neither the name of the University nor of the Laboratory may be used
21 * to endorse or promote products derived from this software without
22 * specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * LBL code modified by speer@eng.sun.com, May 1977.
37 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
39 * @(#)rm_class.c 1.48 97/12/05 SMI
41 #if defined(__FreeBSD__) || defined(__NetBSD__)
45 #include "opt_inet6.h"
47 #endif /* __FreeBSD__ || __NetBSD__ */
48 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
50 #include <sys/param.h>
51 #include <sys/malloc.h>
53 #include <sys/socket.h>
54 #include <sys/systm.h>
55 #include <sys/errno.h>
58 #include <sys/kernel.h>
62 #include <net/if_var.h>
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/ip.h>
69 #include <altq/if_altq.h>
70 #include <altq/altq.h>
71 #include <altq/altq_codel.h>
72 #include <altq/altq_rmclass.h>
73 #include <altq/altq_rmclass_debug.h>
74 #include <altq/altq_red.h>
75 #include <altq/altq_rio.h>
81 #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
87 static int rmc_satisfied(struct rm_class *, struct timeval *);
88 static void rmc_wrr_set_weights(struct rm_ifdat *);
89 static void rmc_depth_compute(struct rm_class *);
90 static void rmc_depth_recompute(rm_class_t *);
92 static mbuf_t *_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
93 static mbuf_t *_rmc_prr_dequeue_next(struct rm_ifdat *, int);
95 static int _rmc_addq(rm_class_t *, mbuf_t *);
96 static void _rmc_dropq(rm_class_t *);
97 static mbuf_t *_rmc_getq(rm_class_t *);
98 static mbuf_t *_rmc_pollq(rm_class_t *);
100 static int rmc_under_limit(struct rm_class *, struct timeval *);
101 static void rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
102 static void rmc_drop_action(struct rm_class *);
103 static void rmc_restart(struct rm_class *);
104 static void rmc_root_overlimit(struct rm_class *, struct rm_class *);
106 #define BORROW_OFFTIME
108 * BORROW_OFFTIME (experimental):
109 * borrow the offtime of the class borrowing from.
110 * the reason is that when its own offtime is set, the class is unable
111 * to borrow much, especially when cutoff is taking effect.
112 * but when the borrowed class is overloaded (advidle is close to minidle),
113 * use the borrowing class's offtime to avoid overload.
115 #define ADJUST_CUTOFF
117 * ADJUST_CUTOFF (experimental):
118 * if no underlimit class is found due to cutoff, increase cutoff and
119 * retry the scheduling loop.
120 * also, don't invoke delay_actions while cutoff is taking effect,
121 * since a sleeping class won't have a chance to be scheduled in the
124 * now heuristics for setting the top-level variable (cutoff_) becomes:
125 * 1. if a packet arrives for a not-overlimit class, set cutoff
126 * to the depth of the class.
127 * 2. if cutoff is i, and a packet arrives for an overlimit class
128 * with an underlimit ancestor at a lower level than i (say j),
129 * then set cutoff to j.
130 * 3. at scheduling a packet, if there is no underlimit class
131 * due to the current cutoff level, increase cutoff by 1 and
132 * then try to schedule again.
137 * rmc_newclass(...) - Create a new resource management class at priority
138 * 'pri' on the interface given by 'ifd'.
140 * nsecPerByte is the data rate of the interface in nanoseconds/byte.
141 * E.g., 800 for a 10Mb/s ethernet. If the class gets less
142 * than 100% of the bandwidth, this number should be the
143 * 'effective' rate for the class. Let f be the
144 * bandwidth fraction allocated to this class, and let
145 * nsPerByte be the data rate of the output link in
146 * nanoseconds/byte. Then nsecPerByte is set to
147 * nsPerByte / f. E.g., 1600 (= 800 / .5)
148 * for a class that gets 50% of an ethernet's bandwidth.
150 * action the routine to call when the class is over limit.
152 * maxq max allowable queue size for class (in packets).
154 * parent parent class pointer.
156 * borrow class to borrow from (should be either 'parent' or null).
158 * maxidle max value allowed for class 'idle' time estimate (this
159 * parameter determines how large an initial burst of packets
160 * can be before overlimit action is invoked.
162 * offtime how long 'delay' action will delay when class goes over
163 * limit (this parameter determines the steady-state burst
164 * size when a class is running over its limit).
166 * Maxidle and offtime have to be computed from the following: If the
167 * average packet size is s, the bandwidth fraction allocated to this
168 * class is f, we want to allow b packet bursts, and the gain of the
169 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
171 * ptime = s * nsPerByte * (1 - f) / f
172 * maxidle = ptime * (1 - g^b) / g^b
173 * minidle = -ptime * (1 / (f - 1))
174 * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
176 * Operationally, it's convenient to specify maxidle & offtime in units
177 * independent of the link bandwidth so the maxidle & offtime passed to
178 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
179 * (The constant factor is a scale factor needed to make the parameters
180 * integers. This scaling also means that the 'unscaled' values of
181 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
182 * not nanoseconds.) Also note that the 'idle' filter computation keeps
183 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
184 * maxidle also must be scaled upward by this value. Thus, the passed
185 * values for maxidle and offtime can be computed as follows:
187 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
188 * offtime = offtime * 8 / (1000 * nsecPerByte)
190 * When USE_HRTIME is employed, then maxidle and offtime become:
191 * maxidle = maxilde * (8.0 / nsecPerByte);
192 * offtime = offtime * (8.0 / nsecPerByte);
195 rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
196 void (*action)(rm_class_t *, rm_class_t *), int maxq,
197 struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
198 int minidle, u_int offtime, int pktsize, int flags)
201 struct rm_class *peer;
204 if (pri >= RM_MAXPRIO)
207 if (flags & RMCF_RED) {
209 printf("rmc_newclass: RED not configured for CBQ!\n");
215 if (flags & RMCF_RIO) {
217 printf("rmc_newclass: RIO not configured for CBQ!\n");
223 if (flags & RMCF_CODEL) {
225 printf("rmc_newclass: CODEL not configured for CBQ!\n");
231 cl = malloc(sizeof(struct rm_class), M_DEVBUF, M_NOWAIT | M_ZERO);
234 CALLOUT_INIT(&cl->callout_);
235 cl->q_ = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO);
236 if (cl->q_ == NULL) {
242 * Class initialization.
244 cl->children_ = NULL;
245 cl->parent_ = parent;
246 cl->borrow_ = borrow;
250 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
253 cl->ns_per_byte_ = nsecPerByte;
255 qlimit(cl->q_) = maxq;
256 qtype(cl->q_) = Q_DROPHEAD;
260 #if 1 /* minidle is also scaled in ALTQ */
261 cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
262 if (cl->minidle_ > 0)
265 cl->minidle_ = minidle;
267 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
268 if (cl->maxidle_ == 0)
270 #if 1 /* offtime is also scaled in ALTQ */
271 cl->avgidle_ = cl->maxidle_;
272 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
273 if (cl->offtime_ == 0)
277 cl->offtime_ = (offtime * nsecPerByte) / 8;
279 cl->overlimit = action;
282 if (flags & (RMCF_RED|RMCF_RIO)) {
283 int red_flags, red_pkttime;
286 if (flags & RMCF_ECN)
287 red_flags |= REDF_ECN;
288 if (flags & RMCF_FLOWVALVE)
289 red_flags |= REDF_FLOWVALVE;
291 if (flags & RMCF_CLEARDSCP)
292 red_flags |= RIOF_CLEARDSCP;
294 red_pkttime = nsecPerByte * pktsize / 1000;
296 if (flags & RMCF_RED) {
297 cl->red_ = red_alloc(0, 0,
298 qlimit(cl->q_) * 10/100,
299 qlimit(cl->q_) * 30/100,
300 red_flags, red_pkttime);
301 if (cl->red_ != NULL)
302 qtype(cl->q_) = Q_RED;
306 cl->red_ = (red_t *)rio_alloc(0, NULL,
307 red_flags, red_pkttime);
308 if (cl->red_ != NULL)
309 qtype(cl->q_) = Q_RIO;
313 #endif /* ALTQ_RED */
315 if (flags & RMCF_CODEL) {
316 cl->codel_ = codel_alloc(5, 100, 0);
317 if (cl->codel_ != NULL)
318 qtype(cl->q_) = Q_CODEL;
323 * put the class into the class tree
331 if ((peer = ifd->active_[pri]) != NULL) {
332 /* find the last class at this pri */
334 while (peer->peer_ != ifd->active_[pri])
338 ifd->active_[pri] = cl;
343 cl->next_ = parent->children_;
344 parent->children_ = cl;
349 * Compute the depth of this class and its ancestors in the class
352 rmc_depth_compute(cl);
355 * If CBQ's WRR is enabled, then initialize the class WRR state.
359 ifd->alloc_[pri] += cl->allotment_;
360 rmc_wrr_set_weights(ifd);
362 IFQ_UNLOCK(ifd->ifq_);
368 rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
369 int minidle, u_int offtime, int pktsize)
371 struct rm_ifdat *ifd;
376 old_allotment = cl->allotment_;
384 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
386 cl->ns_per_byte_ = nsecPerByte;
388 qlimit(cl->q_) = maxq;
390 #if 1 /* minidle is also scaled in ALTQ */
391 cl->minidle_ = (minidle * nsecPerByte) / 8;
392 if (cl->minidle_ > 0)
395 cl->minidle_ = minidle;
397 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
398 if (cl->maxidle_ == 0)
400 #if 1 /* offtime is also scaled in ALTQ */
401 cl->avgidle_ = cl->maxidle_;
402 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
403 if (cl->offtime_ == 0)
407 cl->offtime_ = (offtime * nsecPerByte) / 8;
411 * If CBQ's WRR is enabled, then initialize the class WRR state.
414 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
415 rmc_wrr_set_weights(ifd);
417 IFQ_UNLOCK(ifd->ifq_);
424 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
425 * the appropriate run robin weights for the CBQ weighted round robin
432 rmc_wrr_set_weights(struct rm_ifdat *ifd)
435 struct rm_class *cl, *clh;
437 for (i = 0; i < RM_MAXPRIO; i++) {
439 * This is inverted from that of the simulator to
440 * maintain precision.
442 if (ifd->num_[i] == 0)
445 ifd->M_[i] = ifd->alloc_[i] /
446 (ifd->num_[i] * ifd->maxpkt_);
448 * Compute the weighted allotment for each class.
449 * This takes the expensive div instruction out
450 * of the main loop for the wrr scheduling path.
451 * These only get recomputed when a class comes or
454 if (ifd->active_[i] != NULL) {
455 clh = cl = ifd->active_[i];
457 /* safe-guard for slow link or alloc_ == 0 */
459 cl->w_allotment_ = 0;
461 cl->w_allotment_ = cl->allotment_ /
464 } while ((cl != NULL) && (cl != clh));
470 rmc_get_weight(struct rm_ifdat *ifd, int pri)
472 if ((pri >= 0) && (pri < RM_MAXPRIO))
473 return (ifd->M_[pri]);
480 * rmc_depth_compute(struct rm_class *cl) - This function computes the
481 * appropriate depth of class 'cl' and its ancestors.
487 rmc_depth_compute(struct rm_class *cl)
489 rm_class_t *t = cl, *p;
492 * Recompute the depth for the branch of the tree.
496 if (p && (t->depth_ >= p->depth_)) {
497 p->depth_ = t->depth_ + 1;
506 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
507 * the depth of the tree after a class has been deleted.
513 rmc_depth_recompute(rm_class_t *cl)
520 if ((t = p->children_) == NULL) {
526 if (t->depth_ > cdepth)
531 if (p->depth_ == cdepth + 1)
532 /* no change to this parent */
535 p->depth_ = cdepth + 1;
543 if (cl->depth_ >= 1) {
544 if (cl->children_ == NULL) {
546 } else if ((t = cl->children_) != NULL) {
548 if (t->children_ != NULL)
549 rmc_depth_recompute(t);
553 rmc_depth_compute(cl);
560 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
561 * function deletes a class from the link-sharing structure and frees
562 * all resources associated with the class.
568 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
570 struct rm_class *p, *head, *previous;
573 ASSERT(cl->children_ == NULL);
576 CALLOUT_STOP(&cl->callout_);
585 * Free packets in the packet queue.
586 * XXX - this may not be a desired behavior. Packets should be
592 * If the class has a parent, then remove the class from the
593 * class from the parent's children chain.
595 if (cl->parent_ != NULL) {
596 head = cl->parent_->children_;
598 if (head->next_ == NULL) {
600 cl->parent_->children_ = NULL;
601 cl->parent_->leaf_ = 1;
602 } else while (p != NULL) {
605 cl->parent_->children_ = cl->next_;
607 previous->next_ = cl->next_;
618 * Delete class from class priority peer list.
620 if ((p = ifd->active_[cl->pri_]) != NULL) {
622 * If there is more than one member of this priority
623 * level, then look for class(cl) in the priority level.
626 while (p->peer_ != cl)
628 p->peer_ = cl->peer_;
630 if (ifd->active_[cl->pri_] == cl)
631 ifd->active_[cl->pri_] = cl->peer_;
634 ifd->active_[cl->pri_] = NULL;
639 * Recompute the WRR weights.
642 ifd->alloc_[cl->pri_] -= cl->allotment_;
643 ifd->num_[cl->pri_]--;
644 rmc_wrr_set_weights(ifd);
648 * Re-compute the depth of the tree.
651 rmc_depth_recompute(cl->parent_);
653 rmc_depth_recompute(ifd->root_);
656 IFQ_UNLOCK(ifd->ifq_);
660 * Free the class structure.
662 if (cl->red_ != NULL) {
664 if (q_is_rio(cl->q_))
665 rio_destroy((rio_t *)cl->red_);
668 if (q_is_red(cl->q_))
669 red_destroy(cl->red_);
672 if (q_is_codel(cl->q_))
673 codel_destroy(cl->codel_);
676 free(cl->q_, M_DEVBUF);
683 * rmc_init(...) - Initialize the resource management data structures
684 * associated with the output portion of interface 'ifp'. 'ifd' is
685 * where the structures will be built (for backwards compatibility, the
686 * structures aren't kept in the ifnet struct). 'nsecPerByte'
687 * gives the link speed (inverse of bandwidth) in nanoseconds/byte.
688 * 'restart' is the driver-specific routine that the generic 'delay
689 * until under limit' action will call to restart output. `maxq'
690 * is the queue size of the 'link' & 'default' classes. 'maxqueued'
691 * is the maximum number of packets that the resource management
692 * code will allow to be queued 'downstream' (this is typically 1).
698 rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
699 void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
700 int minidle, u_int offtime, int flags)
705 * Initialize the CBQ tracing/debug facility.
709 bzero((char *)ifd, sizeof (*ifd));
710 mtu = ifq->altq_ifp->if_mtu;
712 ifd->restart = restart;
713 ifd->maxqueued_ = maxqueued;
714 ifd->ns_per_byte_ = nsecPerByte;
716 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
717 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
719 ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
720 if (mtu * nsecPerByte > 10 * 1000000)
721 ifd->maxiftime_ /= 4;
725 CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
728 * Initialize the CBQ's WRR state.
730 for (i = 0; i < RM_MAXPRIO; i++) {
735 ifd->active_[i] = NULL;
739 * Initialize current packet state.
743 for (i = 0; i < RM_MAXQUEUED; i++) {
744 ifd->class_[i] = NULL;
746 ifd->borrowed_[i] = NULL;
750 * Create the root class of the link-sharing structure.
752 if ((ifd->root_ = rmc_newclass(0, ifd,
754 rmc_root_overlimit, maxq, 0, 0,
755 maxidle, minidle, offtime,
757 printf("rmc_init: root class not allocated\n");
760 ifd->root_->depth_ = 0;
765 * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by
766 * mbuf 'm' to queue for resource class 'cl'. This routine is called
767 * by a driver's if_output routine. This routine must be called with
768 * output packet completion interrupts locked out (to avoid racing with
771 * Returns: 0 on successful queueing
772 * -1 when packet drop occurs
775 rmc_queue_packet(struct rm_class *cl, mbuf_t *m)
778 struct rm_ifdat *ifd = cl->ifdat_;
780 int is_empty = qempty(cl->q_);
783 if (ifd->cutoff_ > 0) {
784 if (TV_LT(&cl->undertime_, &now)) {
785 if (ifd->cutoff_ > cl->depth_)
786 ifd->cutoff_ = cl->depth_;
787 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
792 * the class is overlimit. if the class has
793 * underlimit ancestors, set cutoff to the lowest
796 struct rm_class *borrow = cl->borrow_;
798 while (borrow != NULL &&
799 borrow->depth_ < ifd->cutoff_) {
800 if (TV_LT(&borrow->undertime_, &now)) {
801 ifd->cutoff_ = borrow->depth_;
802 CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
805 borrow = borrow->borrow_;
809 else if ((ifd->cutoff_ > 1) && cl->borrow_) {
810 if (TV_LT(&cl->borrow_->undertime_, &now)) {
811 ifd->cutoff_ = cl->borrow_->depth_;
812 CBQTRACE(rmc_queue_packet, 'ffob',
813 cl->borrow_->depth_);
819 if (_rmc_addq(cl, m) < 0)
824 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
828 if (qlen(cl->q_) > qlimit(cl->q_)) {
829 /* note: qlimit can be set to 0 or 1 */
838 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
839 * classes to see if there are satified.
843 rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
848 for (i = RM_MAXPRIO - 1; i >= 0; i--) {
849 if ((bp = ifd->active_[i]) != NULL) {
852 if (!rmc_satisfied(p, now)) {
853 ifd->cutoff_ = p->depth_;
865 * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise.
869 rmc_satisfied(struct rm_class *cl, struct timeval *now)
875 if (TV_LT(now, &cl->undertime_))
877 if (cl->depth_ == 0) {
878 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
883 if (cl->children_ != NULL) {
886 if (!rmc_satisfied(p, now))
896 * Return 1 if class 'cl' is under limit or can borrow from a parent,
897 * 0 if overlimit. As a side-effect, this routine will invoke the
898 * class overlimit action if the class if overlimit.
902 rmc_under_limit(struct rm_class *cl, struct timeval *now)
906 struct rm_ifdat *ifd = cl->ifdat_;
908 ifd->borrowed_[ifd->qi_] = NULL;
910 * If cl is the root class, then always return that it is
911 * underlimit. Otherwise, check to see if the class is underlimit.
913 if (cl->parent_ == NULL)
917 if (TV_LT(now, &cl->undertime_))
920 CALLOUT_STOP(&cl->callout_);
922 cl->undertime_.tv_sec = 0;
927 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
928 if (((cl = cl->borrow_) == NULL) ||
929 (cl->depth_ > ifd->cutoff_)) {
932 /* cutoff is taking effect, just
933 return false without calling
937 #ifdef BORROW_OFFTIME
939 * check if the class can borrow offtime too.
940 * borrow offtime from the top of the borrow
941 * chain if the top class is not overloaded.
944 /* cutoff is taking effect, use this class as top. */
946 CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
948 if (top != NULL && top->avgidle_ == top->minidle_)
951 (p->overlimit)(p, top);
954 (p->overlimit)(p, NULL);
962 ifd->borrowed_[ifd->qi_] = cl;
967 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
968 * Packet-by-packet round robin.
970 * The heart of the weighted round-robin scheduler, which decides which
971 * class next gets to send a packet. Highest priority first, then
972 * weighted round-robin within priorites.
974 * Each able-to-send class gets to send until its byte allocation is
975 * exhausted. Thus, the active pointer is only changed after a class has
976 * exhausted its allocation.
978 * If the scheduler finds no class that is underlimit or able to borrow,
979 * then the first class found that had a nonzero queue and is allowed to
980 * borrow gets to send.
984 _rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
986 struct rm_class *cl = NULL, *first = NULL;
995 * if the driver polls the top of the queue and then removes
996 * the polled packet, we must return the same packet.
998 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
999 cl = ifd->pollcache_;
1001 if (ifd->efficient_) {
1002 /* check if this class is overlimit */
1003 if (cl->undertime_.tv_sec != 0 &&
1004 rmc_under_limit(cl, &now) == 0)
1007 ifd->pollcache_ = NULL;
1011 /* mode == ALTDQ_POLL || pollcache == NULL */
1012 ifd->pollcache_ = NULL;
1013 ifd->borrowed_[ifd->qi_] = NULL;
1015 #ifdef ADJUST_CUTOFF
1018 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1019 if (ifd->na_[cpri] == 0)
1023 * Loop through twice for a priority level, if some class
1024 * was unable to send a packet the first round because
1025 * of the weighted round-robin mechanism.
1026 * During the second loop at this level, deficit==2.
1027 * (This second loop is not needed if for every class,
1028 * "M[cl->pri_])" times "cl->allotment" is greater than
1029 * the byte size for the largest packet in the class.)
1032 cl = ifd->active_[cpri];
1035 if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
1036 cl->bytes_alloc_ += cl->w_allotment_;
1037 if (!qempty(cl->q_)) {
1038 if ((cl->undertime_.tv_sec == 0) ||
1039 rmc_under_limit(cl, &now)) {
1040 if (cl->bytes_alloc_ > 0 || deficit > 1)
1043 /* underlimit but no alloc */
1046 ifd->borrowed_[ifd->qi_] = NULL;
1049 else if (first == NULL && cl->borrow_ != NULL)
1050 first = cl; /* borrowing candidate */
1053 cl->bytes_alloc_ = 0;
1055 } while (cl != ifd->active_[cpri]);
1058 /* first loop found an underlimit class with deficit */
1059 /* Loop on same priority level, with new deficit. */
1065 #ifdef ADJUST_CUTOFF
1067 * no underlimit class found. if cutoff is taking effect,
1068 * increase cutoff and try again.
1070 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1072 CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1075 #endif /* ADJUST_CUTOFF */
1077 * If LINK_EFFICIENCY is turned on, then the first overlimit
1078 * class we encounter will send a packet if all the classes
1079 * of the link-sharing structure are overlimit.
1082 CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1084 if (!ifd->efficient_ || first == NULL)
1089 #if 0 /* too time-consuming for nothing */
1091 CALLOUT_STOP(&cl->callout_);
1093 cl->undertime_.tv_sec = 0;
1095 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1096 ifd->cutoff_ = cl->borrow_->depth_;
1099 * Deque the packet and do the book keeping...
1102 if (op == ALTDQ_REMOVE) {
1105 panic("_rmc_wrr_dequeue_next");
1110 * Update class statistics and link data.
1112 if (cl->bytes_alloc_ > 0)
1113 cl->bytes_alloc_ -= m_pktlen(m);
1115 if ((cl->bytes_alloc_ <= 0) || first == cl)
1116 ifd->active_[cl->pri_] = cl->peer_;
1118 ifd->active_[cl->pri_] = cl;
1120 ifd->class_[ifd->qi_] = cl;
1121 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1122 ifd->now_[ifd->qi_] = now;
1123 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1126 /* mode == ALTDQ_PPOLL */
1128 ifd->pollcache_ = cl;
1134 * Dequeue & return next packet from the highest priority class that
1135 * has a packet to send & has enough allocation to send it. This
1136 * routine is called by a driver whenever it needs a new packet to
1140 _rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1144 struct rm_class *cl, *first = NULL;
1150 * if the driver polls the top of the queue and then removes
1151 * the polled packet, we must return the same packet.
1153 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1154 cl = ifd->pollcache_;
1156 ifd->pollcache_ = NULL;
1159 /* mode == ALTDQ_POLL || pollcache == NULL */
1160 ifd->pollcache_ = NULL;
1161 ifd->borrowed_[ifd->qi_] = NULL;
1163 #ifdef ADJUST_CUTOFF
1166 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1167 if (ifd->na_[cpri] == 0)
1169 cl = ifd->active_[cpri];
1172 if (!qempty(cl->q_)) {
1173 if ((cl->undertime_.tv_sec == 0) ||
1174 rmc_under_limit(cl, &now))
1176 if (first == NULL && cl->borrow_ != NULL)
1180 } while (cl != ifd->active_[cpri]);
1183 #ifdef ADJUST_CUTOFF
1185 * no underlimit class found. if cutoff is taking effect, increase
1186 * cutoff and try again.
1188 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1192 #endif /* ADJUST_CUTOFF */
1194 * If LINK_EFFICIENCY is turned on, then the first overlimit
1195 * class we encounter will send a packet if all the classes
1196 * of the link-sharing structure are overlimit.
1199 if (!ifd->efficient_ || first == NULL)
1204 #if 0 /* too time-consuming for nothing */
1206 CALLOUT_STOP(&cl->callout_);
1208 cl->undertime_.tv_sec = 0;
1210 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1211 ifd->cutoff_ = cl->borrow_->depth_;
1214 * Deque the packet and do the book keeping...
1217 if (op == ALTDQ_REMOVE) {
1220 panic("_rmc_prr_dequeue_next");
1224 ifd->active_[cpri] = cl->peer_;
1226 ifd->class_[ifd->qi_] = cl;
1227 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1228 ifd->now_[ifd->qi_] = now;
1229 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1232 /* mode == ALTDQ_POLL */
1234 ifd->pollcache_ = cl;
1241 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1242 * is invoked by the packet driver to get the next packet to be
1243 * dequeued and output on the link. If WRR is enabled, then the
1244 * WRR dequeue next routine will determine the next packet to sent.
1245 * Otherwise, packet-by-packet round robin is invoked.
1247 * Returns: NULL, if a packet is not available or if all
1248 * classes are overlimit.
1250 * Otherwise, Pointer to the next packet.
1254 rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1256 if (ifd->queued_ >= ifd->maxqueued_)
1259 return (_rmc_wrr_dequeue_next(ifd, mode));
1261 return (_rmc_prr_dequeue_next(ifd, mode));
1265 * Update the utilization estimate for the packet that just completed.
1266 * The packet's class & the parent(s) of that class all get their
1267 * estimators updated. This routine is called by the driver's output-
1268 * packet-completion interrupt service routine.
1272 * a macro to approximate "divide by 1000" that gives 0.000999,
1273 * if a value has enough effective digits.
1274 * (on pentium, mul takes 9 cycles but div takes 46!)
1276 #define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1278 rmc_update_class_util(struct rm_ifdat *ifd)
1280 int idle, avgidle, pktlen;
1281 int pkt_time, tidle;
1282 rm_class_t *cl, *borrowed;
1283 rm_class_t *borrows;
1284 struct timeval *nowp;
1287 * Get the most recent completed class.
1289 if ((cl = ifd->class_[ifd->qo_]) == NULL)
1292 pktlen = ifd->curlen_[ifd->qo_];
1293 borrowed = ifd->borrowed_[ifd->qo_];
1296 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1299 * Run estimator on class and its ancestors.
1302 * rm_update_class_util is designed to be called when the
1303 * transfer is completed from a xmit complete interrupt,
1304 * but most drivers don't implement an upcall for that.
1305 * so, just use estimated completion time.
1306 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1308 nowp = &ifd->now_[ifd->qo_];
1309 /* get pkt_time (for link) in usec */
1310 #if 1 /* use approximation */
1311 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1312 pkt_time = NSEC_TO_USEC(pkt_time);
1314 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1316 #if 1 /* ALTQ4PPP */
1317 if (TV_LT(nowp, &ifd->ifnow_)) {
1321 * make sure the estimated completion time does not go
1322 * too far. it can happen when the link layer supports
1323 * data compression or the interface speed is set to
1324 * a much lower value.
1326 TV_DELTA(&ifd->ifnow_, nowp, iftime);
1327 if (iftime+pkt_time < ifd->maxiftime_) {
1328 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1330 TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1333 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1336 if (TV_LT(nowp, &ifd->ifnow_)) {
1337 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1339 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1343 while (cl != NULL) {
1344 TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1345 if (idle >= 2000000)
1347 * this class is idle enough, reset avgidle.
1348 * (TV_DELTA returns 2000000 us when delta is large.)
1350 cl->avgidle_ = cl->maxidle_;
1352 /* get pkt_time (for class) in usec */
1353 #if 1 /* use approximation */
1354 pkt_time = pktlen * cl->ns_per_byte_;
1355 pkt_time = NSEC_TO_USEC(pkt_time);
1357 pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1361 avgidle = cl->avgidle_;
1362 avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1363 cl->avgidle_ = avgidle;
1365 /* Are we overlimit ? */
1367 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1370 * need some lower bound for avgidle, otherwise
1371 * a borrowing class gets unbounded penalty.
1373 if (avgidle < cl->minidle_)
1374 avgidle = cl->avgidle_ = cl->minidle_;
1376 /* set next idle to make avgidle 0 */
1378 (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1379 TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1383 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1384 cl->undertime_.tv_sec = 0;
1385 if (cl->sleeping_) {
1386 CALLOUT_STOP(&cl->callout_);
1391 if (borrows != NULL) {
1393 ++cl->stats_.borrows;
1397 cl->last_ = ifd->ifnow_;
1398 cl->last_pkttime_ = pkt_time;
1401 if (cl->parent_ == NULL) {
1402 /* take stats of root class */
1403 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1411 * Check to see if cutoff needs to set to a new level.
1413 cl = ifd->class_[ifd->qo_];
1414 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1416 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1417 rmc_tl_satisfied(ifd, nowp);
1418 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1420 ifd->cutoff_ = borrowed->depth_;
1421 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1424 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1427 rmc_tl_satisfied(ifd, &now);
1429 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1431 ifd->cutoff_ = borrowed->depth_;
1432 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1438 * Release class slot
1440 ifd->borrowed_[ifd->qo_] = NULL;
1441 ifd->class_[ifd->qo_] = NULL;
1442 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1448 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1449 * over-limit action routines. These get invoked by rmc_under_limit()
1450 * if a class with packets to send if over its bandwidth limit & can't
1451 * borrow from a parent class.
1457 rmc_drop_action(struct rm_class *cl)
1459 struct rm_ifdat *ifd = cl->ifdat_;
1461 ASSERT(qlen(cl->q_) > 0);
1464 ifd->na_[cl->pri_]--;
1467 void rmc_dropall(struct rm_class *cl)
1469 struct rm_ifdat *ifd = cl->ifdat_;
1471 if (!qempty(cl->q_)) {
1474 ifd->na_[cl->pri_]--;
1478 #if (__FreeBSD_version > 300000)
1479 /* hzto() is removed from FreeBSD-3.0 */
1480 static int hzto(struct timeval *);
1489 t2.tv_sec = tv->tv_sec - t2.tv_sec;
1490 t2.tv_usec = tv->tv_usec - t2.tv_usec;
1491 return (tvtohz(&t2));
1493 #endif /* __FreeBSD_version > 300000 */
1497 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1498 * delay action routine. It is invoked via rmc_under_limit when the
1499 * packet is discoverd to be overlimit.
1501 * If the delay action is result of borrow class being overlimit, then
1502 * delay for the offtime of the borrowing class that is overlimit.
1508 rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1510 int delay, t, extradelay;
1512 cl->stats_.overactions++;
1513 TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1514 #ifndef BORROW_OFFTIME
1515 delay += cl->offtime_;
1518 if (!cl->sleeping_) {
1519 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1520 #ifdef BORROW_OFFTIME
1522 extradelay = borrow->offtime_;
1525 extradelay = cl->offtime_;
1529 * XXX recalculate suspend time:
1530 * current undertime is (tidle + pkt_time) calculated
1531 * from the last transmission.
1532 * tidle: time required to bring avgidle back to 0
1533 * pkt_time: target waiting time for this class
1534 * we need to replace pkt_time by offtime
1536 extradelay -= cl->last_pkttime_;
1538 if (extradelay > 0) {
1539 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1540 delay += extradelay;
1544 cl->stats_.delays++;
1547 * Since packets are phased randomly with respect to the
1548 * clock, 1 tick (the next clock tick) can be an arbitrarily
1549 * short time so we have to wait for at least two ticks.
1550 * NOTE: If there's no other traffic, we need the timer as
1551 * a 'backstop' to restart this class.
1553 if (delay > tick * 2) {
1555 /* FreeBSD rounds up the tick */
1556 t = hzto(&cl->undertime_);
1558 /* other BSDs round down the tick */
1559 t = hzto(&cl->undertime_) + 1;
1563 CALLOUT_RESET(&cl->callout_, t,
1564 (timeout_t *)rmc_restart, (caddr_t)cl);
1570 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1571 * called by the system timer code & is responsible checking if the
1572 * class is still sleeping (it might have been restarted as a side
1573 * effect of the queue scan on a packet arrival) and, if so, restarting
1574 * output for the class. Inspecting the class state & restarting output
1575 * require locking the class structure. In general the driver is
1576 * responsible for locking but this is the only routine that is not
1577 * called directly or indirectly from the interface driver so it has
1578 * know about system locking conventions. Under bsd, locking is done
1579 * by raising IPL to splimp so that's what's implemented here. On a
1580 * different system this would probably need to be changed.
1586 rmc_restart(struct rm_class *cl)
1588 struct rm_ifdat *ifd = cl->ifdat_;
1596 IFQ_LOCK(ifd->ifq_);
1597 if (cl->sleeping_) {
1599 cl->undertime_.tv_sec = 0;
1601 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1602 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1603 (ifd->restart)(ifd->ifq_);
1606 IFQ_UNLOCK(ifd->ifq_);
1612 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1613 * handling routine for the root class of the link sharing structure.
1619 rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1621 panic("rmc_root_overlimit");
1625 * Packet Queue handling routines. Eventually, this is to localize the
1626 * effects on the code whether queues are red queues or droptail
1631 _rmc_addq(rm_class_t *cl, mbuf_t *m)
1634 if (q_is_rio(cl->q_))
1635 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1638 if (q_is_red(cl->q_))
1639 return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1640 #endif /* ALTQ_RED */
1642 if (q_is_codel(cl->q_))
1643 return codel_addq(cl->codel_, cl->q_, m);
1646 if (cl->flags_ & RMCF_CLEARDSCP)
1647 write_dsfield(m, cl->pktattr_, 0);
1653 /* note: _rmc_dropq is not called for red */
1655 _rmc_dropq(rm_class_t *cl)
1659 if ((m = _getq(cl->q_)) != NULL)
1664 _rmc_getq(rm_class_t *cl)
1667 if (q_is_rio(cl->q_))
1668 return rio_getq((rio_t *)cl->red_, cl->q_);
1671 if (q_is_red(cl->q_))
1672 return red_getq(cl->red_, cl->q_);
1675 if (q_is_codel(cl->q_))
1676 return codel_getq(cl->codel_, cl->q_);
1678 return _getq(cl->q_);
1682 _rmc_pollq(rm_class_t *cl)
1684 return qhead(cl->q_);
1689 struct cbqtrace cbqtrace_buffer[NCBQTRACE+1];
1690 struct cbqtrace *cbqtrace_ptr = NULL;
1694 * DDB hook to trace cbq events:
1695 * the last 1024 events are held in a circular buffer.
1696 * use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1698 void cbqtrace_dump(int);
1699 static char *rmc_funcname(void *);
1701 static struct rmc_funcs {
1706 rmc_init, "rmc_init",
1707 rmc_queue_packet, "rmc_queue_packet",
1708 rmc_under_limit, "rmc_under_limit",
1709 rmc_update_class_util, "rmc_update_class_util",
1710 rmc_delay_action, "rmc_delay_action",
1711 rmc_restart, "rmc_restart",
1712 _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next",
1716 static char *rmc_funcname(void *func)
1718 struct rmc_funcs *fp;
1720 for (fp = rmc_funcs; fp->func != NULL; fp++)
1721 if (fp->func == func)
1726 void cbqtrace_dump(int counter)
1731 counter = counter % NCBQTRACE;
1732 p = (int *)&cbqtrace_buffer[counter];
1734 for (i=0; i<20; i++) {
1735 printf("[0x%x] ", *p++);
1736 printf("%s: ", rmc_funcname((void *)*p++));
1738 printf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1739 printf("%d\n",*p++);
1741 if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1742 p = (int *)cbqtrace_buffer;
1745 #endif /* CBQ_TRACE */
1746 #endif /* ALTQ_CBQ */
1748 #if defined(ALTQ_CBQ) || defined(ALTQ_RED) || defined(ALTQ_RIO) || \
1749 defined(ALTQ_HFSC) || defined(ALTQ_PRIQ) || defined(ALTQ_CODEL)
1750 #if !defined(__GNUC__) || defined(ALTQ_DEBUG)
1753 _addq(class_queue_t *q, mbuf_t *m)
1757 if ((m0 = qtail(q)) != NULL)
1758 m->m_nextpkt = m0->m_nextpkt;
1767 _getq(class_queue_t *q)
1771 if ((m = qtail(q)) == NULL)
1773 if ((m0 = m->m_nextpkt) != m)
1774 m->m_nextpkt = m0->m_nextpkt;
1776 ASSERT(qlen(q) == 1);
1780 m0->m_nextpkt = NULL;
1784 /* drop a packet at the tail of the queue */
1786 _getq_tail(class_queue_t *q)
1788 mbuf_t *m, *m0, *prev;
1790 if ((m = m0 = qtail(q)) == NULL)
1796 prev->m_nextpkt = m->m_nextpkt;
1798 ASSERT(qlen(q) == 1);
1803 m->m_nextpkt = NULL;
1807 /* randomly select a packet in the queue */
1809 _getq_random(class_queue_t *q)
1814 if ((m = qtail(q)) == NULL)
1816 if (m->m_nextpkt == m) {
1817 ASSERT(qlen(q) == 1);
1820 struct mbuf *prev = NULL;
1822 n = arc4random() % qlen(q) + 1;
1823 for (i = 0; i < n; i++) {
1827 prev->m_nextpkt = m->m_nextpkt;
1832 m->m_nextpkt = NULL;
1837 _removeq(class_queue_t *q, mbuf_t *m)
1846 prev->m_nextpkt = m->m_nextpkt;
1849 else if (qtail(q) == m)
1855 _flushq(class_queue_t *q)
1859 while ((m = _getq(q)) != NULL)
1861 ASSERT(qlen(q) == 0);
1864 #endif /* !__GNUC__ || ALTQ_DEBUG */
1865 #endif /* ALTQ_CBQ || ALTQ_RED || ALTQ_RIO || ALTQ_HFSC || ALTQ_PRIQ */