2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/net/altq/altq_fairq.c,v 1.1 2008/04/06 18:58:15 dillon Exp $
38 * Matt: I gutted altq_priq.c and used it as a skeleton on which to build
39 * fairq. The fairq algorithm is completely different then priq, of course,
40 * but because I used priq's skeleton I believe I should include priq's
43 * Copyright (C) 2000-2003
44 * Sony Computer Science Laboratories Inc. All rights reserved.
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
49 * 1. Redistributions of source code must retain the above copyright
50 * notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 * notice, this list of conditions and the following disclaimer in the
53 * documentation and/or other materials provided with the distribution.
55 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * FAIRQ - take traffic classified by keep state (hashed into
70 * mbuf->m_pkthdr.altq_state_hash) and bucketize it. Fairly extract
71 * the first packet from each bucket in a round-robin fashion.
73 * TODO - better overall qlimit support (right now it is per-bucket).
74 * - NOTE: red etc is per bucket, not overall.
75 * - better service curve support.
79 * altq on em0 fairq bandwidth 650Kb queue { std, bulk }
80 * queue std priority 3 bandwidth 400Kb \
81 * fairq (buckets 64, default, hogs 1Kb) qlimit 50
82 * queue bulk priority 2 bandwidth 100Kb \
83 * fairq (buckets 64, hogs 1Kb) qlimit 50
85 * pass out on em0 from any to any keep state queue std
86 * pass out on em0 inet proto tcp ..... port ... keep state queue bulk
90 #include "opt_inet6.h"
92 #ifdef ALTQ_FAIRQ /* fairq is enabled in the kernel conf */
94 #include <sys/param.h>
95 #include <sys/malloc.h>
97 #include <sys/socket.h>
98 #include <sys/sockio.h>
99 #include <sys/systm.h>
100 #include <sys/proc.h>
101 #include <sys/errno.h>
102 #include <sys/kernel.h>
103 #include <sys/queue.h>
106 #include <net/if_var.h>
107 #include <netinet/in.h>
109 #include <netpfil/pf/pf.h>
110 #include <netpfil/pf/pf_altq.h>
111 #include <netpfil/pf/pf_mtag.h>
112 #include <net/altq/altq.h>
113 #include <net/altq/altq_fairq.h>
116 * function prototypes
118 static int fairq_clear_interface(struct fairq_if *);
119 static int fairq_request(struct ifaltq *, int, void *);
120 static void fairq_purge(struct fairq_if *);
121 static struct fairq_class *fairq_class_create(struct fairq_if *, int, int, u_int, struct fairq_opts *, int);
122 static int fairq_class_destroy(struct fairq_class *);
123 static int fairq_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
124 static struct mbuf *fairq_dequeue(struct ifaltq *, int);
126 static int fairq_addq(struct fairq_class *, struct mbuf *, u_int32_t);
127 static struct mbuf *fairq_getq(struct fairq_class *, uint64_t);
128 static struct mbuf *fairq_pollq(struct fairq_class *, uint64_t, int *);
129 static fairq_bucket_t *fairq_selectq(struct fairq_class *, int);
130 static void fairq_purgeq(struct fairq_class *);
132 static void get_class_stats(struct fairq_classstats *, struct fairq_class *);
133 static struct fairq_class *clh_to_clp(struct fairq_if *, uint32_t);
136 fairq_pfattach(struct pf_altq *a)
141 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
144 error = altq_attach(&ifp->if_snd, ALTQT_FAIRQ, a->altq_disc,
145 fairq_enqueue, fairq_dequeue, fairq_request, NULL, NULL);
151 fairq_add_altq(struct ifnet *ifp, struct pf_altq *a)
153 struct fairq_if *pif;
157 if (!ALTQ_IS_READY(&ifp->if_snd))
161 pif = malloc(sizeof(struct fairq_if),
162 M_DEVBUF, M_WAITOK | M_ZERO);
163 pif->pif_bandwidth = a->ifbandwidth;
164 pif->pif_maxpri = -1;
165 pif->pif_ifq = &ifp->if_snd;
167 /* keep the state in pf_altq */
174 fairq_remove_altq(struct pf_altq *a)
176 struct fairq_if *pif;
178 if ((pif = a->altq_disc) == NULL)
182 fairq_clear_interface(pif);
189 fairq_add_queue(struct pf_altq *a)
191 struct fairq_if *pif;
192 struct fairq_class *cl;
194 if ((pif = a->altq_disc) == NULL)
197 /* check parameters */
198 if (a->priority >= FAIRQ_MAXPRI)
202 if (pif->pif_classes[a->priority] != NULL)
204 if (clh_to_clp(pif, a->qid) != NULL)
207 cl = fairq_class_create(pif, a->priority, a->qlimit, a->bandwidth,
208 &a->pq_u.fairq_opts, a->qid);
216 fairq_remove_queue(struct pf_altq *a)
218 struct fairq_if *pif;
219 struct fairq_class *cl;
221 if ((pif = a->altq_disc) == NULL)
224 if ((cl = clh_to_clp(pif, a->qid)) == NULL)
227 return (fairq_class_destroy(cl));
231 fairq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes, int version)
233 struct fairq_if *pif;
234 struct fairq_class *cl;
235 struct fairq_classstats stats;
238 if ((pif = altq_lookup(a->ifname, ALTQT_FAIRQ)) == NULL)
241 if ((cl = clh_to_clp(pif, a->qid)) == NULL)
244 if (*nbytes < sizeof(stats))
247 get_class_stats(&stats, cl);
249 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
251 *nbytes = sizeof(stats);
256 * bring the interface back to the initial state by discarding
257 * all the filters and classes.
260 fairq_clear_interface(struct fairq_if *pif)
262 struct fairq_class *cl;
265 /* clear out the classes */
266 for (pri = 0; pri <= pif->pif_maxpri; pri++) {
267 if ((cl = pif->pif_classes[pri]) != NULL)
268 fairq_class_destroy(cl);
275 fairq_request(struct ifaltq *ifq, int req, void *arg)
277 struct fairq_if *pif = (struct fairq_if *)ifq->altq_disc;
279 IFQ_LOCK_ASSERT(ifq);
289 /* discard all the queued packets on the interface */
291 fairq_purge(struct fairq_if *pif)
293 struct fairq_class *cl;
296 for (pri = 0; pri <= pif->pif_maxpri; pri++) {
297 if ((cl = pif->pif_classes[pri]) != NULL && cl->cl_head)
300 if (ALTQ_IS_ENABLED(pif->pif_ifq))
301 pif->pif_ifq->ifq_len = 0;
304 static struct fairq_class *
305 fairq_class_create(struct fairq_if *pif, int pri, int qlimit,
306 u_int bandwidth, struct fairq_opts *opts, int qid)
308 struct fairq_class *cl;
309 int flags = opts->flags;
310 u_int nbuckets = opts->nbuckets;
314 if (flags & FARF_RED) {
316 printf("fairq_class_create: RED not configured for FAIRQ!\n");
322 if (flags & FARF_CODEL) {
324 printf("fairq_class_create: CODEL not configured for FAIRQ!\n");
331 if (nbuckets > FAIRQ_MAX_BUCKETS)
332 nbuckets = FAIRQ_MAX_BUCKETS;
333 /* enforce power-of-2 size */
334 while ((nbuckets ^ (nbuckets - 1)) != ((nbuckets << 1) - 1))
337 if ((cl = pif->pif_classes[pri]) != NULL) {
338 /* modify the class instead of creating a new one */
339 IFQ_LOCK(cl->cl_pif->pif_ifq);
342 IFQ_UNLOCK(cl->cl_pif->pif_ifq);
344 if (cl->cl_qtype == Q_RIO)
345 rio_destroy((rio_t *)cl->cl_red);
348 if (cl->cl_qtype == Q_RED)
349 red_destroy(cl->cl_red);
352 if (cl->cl_qtype == Q_CODEL)
353 codel_destroy(cl->cl_codel);
356 cl = malloc(sizeof(struct fairq_class),
357 M_DEVBUF, M_WAITOK | M_ZERO);
358 cl->cl_nbuckets = nbuckets;
359 cl->cl_nbucket_mask = nbuckets - 1;
361 cl->cl_buckets = malloc(
362 sizeof(struct fairq_bucket) * cl->cl_nbuckets,
363 M_DEVBUF, M_WAITOK | M_ZERO);
367 pif->pif_classes[pri] = cl;
368 if (flags & FARF_DEFAULTCLASS)
369 pif->pif_default = cl;
371 qlimit = 50; /* use default */
372 cl->cl_qlimit = qlimit;
373 for (i = 0; i < cl->cl_nbuckets; ++i) {
374 qlimit(&cl->cl_buckets[i].queue) = qlimit;
376 cl->cl_bandwidth = bandwidth / 8;
377 cl->cl_qtype = Q_DROPTAIL;
378 cl->cl_flags = flags & FARF_USERFLAGS;
380 if (pri > pif->pif_maxpri)
381 pif->pif_maxpri = pri;
384 cl->cl_hogs_m1 = opts->hogs_m1 / 8;
385 cl->cl_lssc_m1 = opts->lssc_m1 / 8; /* NOT YET USED */
388 if (flags & (FARF_RED|FARF_RIO)) {
389 int red_flags, red_pkttime;
392 if (flags & FARF_ECN)
393 red_flags |= REDF_ECN;
395 if (flags & FARF_CLEARDSCP)
396 red_flags |= RIOF_CLEARDSCP;
398 if (pif->pif_bandwidth < 8)
399 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
401 red_pkttime = (int64_t)pif->pif_ifq->altq_ifp->if_mtu
402 * 1000 * 1000 * 1000 / (pif->pif_bandwidth / 8);
404 if (flags & FARF_RIO) {
405 cl->cl_red = (red_t *)rio_alloc(0, NULL,
406 red_flags, red_pkttime);
407 if (cl->cl_red != NULL)
408 cl->cl_qtype = Q_RIO;
411 if (flags & FARF_RED) {
412 cl->cl_red = red_alloc(0, 0,
413 cl->cl_qlimit * 10/100,
414 cl->cl_qlimit * 30/100,
415 red_flags, red_pkttime);
416 if (cl->cl_red != NULL)
417 cl->cl_qtype = Q_RED;
420 #endif /* ALTQ_RED */
422 if (flags & FARF_CODEL) {
423 cl->cl_codel = codel_alloc(5, 100, 0);
424 if (cl->cl_codel != NULL)
425 cl->cl_qtype = Q_CODEL;
433 fairq_class_destroy(struct fairq_class *cl)
435 struct fairq_if *pif;
438 IFQ_LOCK(cl->cl_pif->pif_ifq);
444 pif->pif_classes[cl->cl_pri] = NULL;
445 if (pif->pif_poll_cache == cl)
446 pif->pif_poll_cache = NULL;
447 if (pif->pif_maxpri == cl->cl_pri) {
448 for (pri = cl->cl_pri; pri >= 0; pri--)
449 if (pif->pif_classes[pri] != NULL) {
450 pif->pif_maxpri = pri;
454 pif->pif_maxpri = -1;
456 IFQ_UNLOCK(cl->cl_pif->pif_ifq);
458 if (cl->cl_red != NULL) {
460 if (cl->cl_qtype == Q_RIO)
461 rio_destroy((rio_t *)cl->cl_red);
464 if (cl->cl_qtype == Q_RED)
465 red_destroy(cl->cl_red);
468 if (cl->cl_qtype == Q_CODEL)
469 codel_destroy(cl->cl_codel);
472 free(cl->cl_buckets, M_DEVBUF);
479 * fairq_enqueue is an enqueue function to be registered to
480 * (*altq_enqueue) in struct ifaltq.
483 fairq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
485 struct fairq_if *pif = (struct fairq_if *)ifq->altq_disc;
486 struct fairq_class *cl = NULL; /* Make compiler happy */
488 u_int32_t qid_hash = 0;
491 IFQ_LOCK_ASSERT(ifq);
493 /* grab class set by classifier */
494 if ((m->m_flags & M_PKTHDR) == 0) {
495 /* should not happen */
496 printf("altq: packet for %s does not have pkthdr\n",
497 ifq->altq_ifp->if_xname);
502 if ((t = pf_find_mtag(m)) != NULL) {
503 cl = clh_to_clp(pif, t->qid);
504 qid_hash = t->qid_hash;
507 cl = pif->pif_default;
513 cl->cl_flags |= FARF_HAS_PACKETS;
514 cl->cl_pktattr = NULL;
516 if (fairq_addq(cl, m, qid_hash) != 0) {
517 /* drop occurred. mbuf was freed in fairq_addq. */
518 PKTCNTR_ADD(&cl->cl_dropcnt, len);
527 * fairq_dequeue is a dequeue function to be registered to
528 * (*altq_dequeue) in struct ifaltq.
530 * note: ALTDQ_POLL returns the next packet without removing the packet
531 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
532 * ALTDQ_REMOVE must return the same packet if called immediately
536 fairq_dequeue(struct ifaltq *ifq, int op)
538 struct fairq_if *pif = (struct fairq_if *)ifq->altq_disc;
539 struct fairq_class *cl;
540 struct fairq_class *best_cl;
542 struct mbuf *m = NULL;
543 uint64_t cur_time = read_machclk();
547 IFQ_LOCK_ASSERT(ifq);
549 if (IFQ_IS_EMPTY(ifq)) {
553 if (pif->pif_poll_cache && op == ALTDQ_REMOVE) {
554 best_cl = pif->pif_poll_cache;
555 m = fairq_getq(best_cl, cur_time);
556 pif->pif_poll_cache = NULL;
559 PKTCNTR_ADD(&best_cl->cl_xmitcnt, m_pktlen(m));
566 for (pri = pif->pif_maxpri; pri >= 0; pri--) {
567 if ((cl = pif->pif_classes[pri]) == NULL)
569 if ((cl->cl_flags & FARF_HAS_PACKETS) == 0)
571 m = fairq_pollq(cl, cur_time, &hit_limit);
573 cl->cl_flags &= ~FARF_HAS_PACKETS;
578 * Only override the best choice if we are under
581 if (hit_limit == 0 || best_cl == NULL) {
587 * Remember the highest priority mbuf in case we
588 * do not find any lower priority mbufs.
594 if (op == ALTDQ_POLL) {
595 pif->pif_poll_cache = best_cl;
597 } else if (best_cl) {
598 m = fairq_getq(best_cl, cur_time);
601 PKTCNTR_ADD(&best_cl->cl_xmitcnt, m_pktlen(m));
610 fairq_addq(struct fairq_class *cl, struct mbuf *m, u_int32_t bucketid)
617 * If the packet doesn't have any keep state put it on the end of
618 * our queue. XXX this can result in out of order delivery.
622 b = cl->cl_head->prev;
624 b = &cl->cl_buckets[0];
626 hindex = bucketid & cl->cl_nbucket_mask;
627 b = &cl->cl_buckets[hindex];
631 * Add the bucket to the end of the circular list of active buckets.
633 * As a special case we add the bucket to the beginning of the list
634 * instead of the end if it was not previously on the list and if
635 * its traffic is less then the hog level.
637 if (b->in_use == 0) {
639 if (cl->cl_head == NULL) {
644 b->next = cl->cl_head;
645 b->prev = cl->cl_head->prev;
649 if (b->bw_delta && cl->cl_hogs_m1) {
650 bw = b->bw_bytes * machclk_freq / b->bw_delta;
651 if (bw < cl->cl_hogs_m1)
658 if (cl->cl_qtype == Q_RIO)
659 return rio_addq((rio_t *)cl->cl_red, &b->queue, m, cl->cl_pktattr);
662 if (cl->cl_qtype == Q_RED)
663 return red_addq(cl->cl_red, &b->queue, m, cl->cl_pktattr);
666 if (cl->cl_qtype == Q_CODEL)
667 return codel_addq(cl->cl_codel, &b->queue, m);
669 if (qlen(&b->queue) >= qlimit(&b->queue)) {
674 if (cl->cl_flags & FARF_CLEARDSCP)
675 write_dsfield(m, cl->cl_pktattr, 0);
683 fairq_getq(struct fairq_class *cl, uint64_t cur_time)
688 b = fairq_selectq(cl, 0);
692 else if (cl->cl_qtype == Q_RIO)
693 m = rio_getq((rio_t *)cl->cl_red, &b->queue);
696 else if (cl->cl_qtype == Q_RED)
697 m = red_getq(cl->cl_red, &b->queue);
700 else if (cl->cl_qtype == Q_CODEL)
701 m = codel_getq(cl->cl_codel, &b->queue);
704 m = _getq(&b->queue);
707 * Calculate the BW change
713 * Per-class bandwidth calculation
715 delta = (cur_time - cl->cl_last_time);
716 if (delta > machclk_freq * 8)
717 delta = machclk_freq * 8;
718 cl->cl_bw_delta += delta;
719 cl->cl_bw_bytes += m->m_pkthdr.len;
720 cl->cl_last_time = cur_time;
721 cl->cl_bw_delta -= cl->cl_bw_delta >> 3;
722 cl->cl_bw_bytes -= cl->cl_bw_bytes >> 3;
725 * Per-bucket bandwidth calculation
727 delta = (cur_time - b->last_time);
728 if (delta > machclk_freq * 8)
729 delta = machclk_freq * 8;
730 b->bw_delta += delta;
731 b->bw_bytes += m->m_pkthdr.len;
732 b->last_time = cur_time;
733 b->bw_delta -= b->bw_delta >> 3;
734 b->bw_bytes -= b->bw_bytes >> 3;
740 * Figure out what the next packet would be if there were no limits. If
741 * this class hits its bandwidth limit *hit_limit is set to no-zero, otherwise
742 * it is set to 0. A non-NULL mbuf is returned either way.
745 fairq_pollq(struct fairq_class *cl, uint64_t cur_time, int *hit_limit)
753 b = fairq_selectq(cl, 1);
756 m = qhead(&b->queue);
759 * Did this packet exceed the class bandwidth? Calculate the
760 * bandwidth component of the packet.
762 * - Calculate bytes per second
764 delta = cur_time - cl->cl_last_time;
765 if (delta > machclk_freq * 8)
766 delta = machclk_freq * 8;
767 cl->cl_bw_delta += delta;
768 cl->cl_last_time = cur_time;
769 if (cl->cl_bw_delta) {
770 bw = cl->cl_bw_bytes * machclk_freq / cl->cl_bw_delta;
772 if (bw > cl->cl_bandwidth)
775 printf("BW %6ju relative to %6u %d queue %p\n",
776 (uintmax_t)bw, cl->cl_bandwidth, *hit_limit, b);
783 * Locate the next queue we want to pull a packet out of. This code
784 * is also responsible for removing empty buckets from the circular list.
788 fairq_selectq(struct fairq_class *cl, int ispoll)
793 if (ispoll == 0 && cl->cl_polled) {
795 cl->cl_polled = NULL;
799 while ((b = cl->cl_head) != NULL) {
801 * Remove empty queues from consideration
803 if (qempty(&b->queue)) {
805 cl->cl_head = b->next;
806 if (cl->cl_head == b) {
809 b->next->prev = b->prev;
810 b->prev->next = b->next;
816 * Advance the round robin. Queues with bandwidths less
817 * then the hog bandwidth are allowed to burst.
819 if (cl->cl_hogs_m1 == 0) {
820 cl->cl_head = b->next;
821 } else if (b->bw_delta) {
822 bw = b->bw_bytes * machclk_freq / b->bw_delta;
823 if (bw >= cl->cl_hogs_m1) {
824 cl->cl_head = b->next;
842 fairq_purgeq(struct fairq_class *cl)
847 while ((b = fairq_selectq(cl, 0)) != NULL) {
848 while ((m = _getq(&b->queue)) != NULL) {
849 PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m));
852 ASSERT(qlen(&b->queue) == 0);
857 get_class_stats(struct fairq_classstats *sp, struct fairq_class *cl)
861 sp->class_handle = cl->cl_handle;
862 sp->qlimit = cl->cl_qlimit;
863 sp->xmit_cnt = cl->cl_xmitcnt;
864 sp->drop_cnt = cl->cl_dropcnt;
865 sp->qtype = cl->cl_qtype;
871 sp->qlength += qlen(&b->queue);
873 } while (b != cl->cl_head);
877 if (cl->cl_qtype == Q_RED)
878 red_getstats(cl->cl_red, &sp->red[0]);
881 if (cl->cl_qtype == Q_RIO)
882 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
885 if (cl->cl_qtype == Q_CODEL)
886 codel_getstats(cl->cl_codel, &sp->codel);
890 /* convert a class handle to the corresponding class pointer */
891 static struct fairq_class *
892 clh_to_clp(struct fairq_if *pif, uint32_t chandle)
894 struct fairq_class *cl;
900 for (idx = pif->pif_maxpri; idx >= 0; idx--)
901 if ((cl = pif->pif_classes[idx]) != NULL &&
902 cl->cl_handle == chandle)
908 #endif /* ALTQ_FAIRQ */