2 * Copyright (C) 1997-2003
3 * Sony Computer Science Laboratories Inc. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $KAME: altq_subr.c,v 1.21 2003/11/06 06:32:53 kjc Exp $
32 #include "opt_inet6.h"
34 #include <sys/param.h>
35 #include <sys/malloc.h>
37 #include <sys/systm.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/kernel.h>
42 #include <sys/errno.h>
43 #include <sys/syslog.h>
44 #include <sys/sysctl.h>
45 #include <sys/queue.h>
48 #include <net/if_var.h>
49 #include <net/if_dl.h>
50 #include <net/if_types.h>
53 #include <netinet/in.h>
54 #include <netinet/in_systm.h>
55 #include <netinet/ip.h>
57 #include <netinet/ip6.h>
59 #include <netinet/tcp.h>
60 #include <netinet/udp.h>
62 #include <netpfil/pf/pf.h>
63 #include <netpfil/pf/pf_altq.h>
64 #include <net/altq/altq.h>
66 /* machine dependent clock related includes */
69 #include <sys/eventhandler.h>
70 #include <machine/clock.h>
71 #if defined(__amd64__) || defined(__i386__)
72 #include <machine/cpufunc.h> /* for pentium tsc */
73 #include <machine/specialreg.h> /* for CPUID_TSC */
74 #include <machine/md_var.h> /* for cpu_feature */
75 #endif /* __amd64 || __i386__ */
78 * internal function prototypes
80 static void tbr_timeout(void *);
81 int (*altq_input)(struct mbuf *, int) = NULL;
82 static struct mbuf *tbr_dequeue(struct ifaltq *, int);
83 static int tbr_timer = 0; /* token bucket regulator timer */
84 #if !defined(__FreeBSD__) || (__FreeBSD_version < 600000)
85 static struct callout tbr_callout = CALLOUT_INITIALIZER;
87 static struct callout tbr_callout;
90 #ifdef ALTQ3_CLFIER_COMPAT
91 static int extract_ports4(struct mbuf *, struct ip *, struct flowinfo_in *);
93 static int extract_ports6(struct mbuf *, struct ip6_hdr *,
94 struct flowinfo_in6 *);
96 static int apply_filter4(u_int32_t, struct flow_filter *,
97 struct flowinfo_in *);
98 static int apply_ppfilter4(u_int32_t, struct flow_filter *,
99 struct flowinfo_in *);
101 static int apply_filter6(u_int32_t, struct flow_filter6 *,
102 struct flowinfo_in6 *);
104 static int apply_tosfilter4(u_int32_t, struct flow_filter *,
105 struct flowinfo_in *);
106 static u_long get_filt_handle(struct acc_classifier *, int);
107 static struct acc_filter *filth_to_filtp(struct acc_classifier *, u_long);
108 static u_int32_t filt2fibmask(struct flow_filter *);
110 static void ip4f_cache(struct ip *, struct flowinfo_in *);
111 static int ip4f_lookup(struct ip *, struct flowinfo_in *);
112 static int ip4f_init(void);
113 static struct ip4_frag *ip4f_alloc(void);
114 static void ip4f_free(struct ip4_frag *);
115 #endif /* ALTQ3_CLFIER_COMPAT */
118 SYSCTL_NODE(_kern_features, OID_AUTO, altq, CTLFLAG_RD | CTLFLAG_CAPRD, 0,
119 "ALTQ packet queuing");
121 #define ALTQ_FEATURE(name, desc) \
122 SYSCTL_INT_WITH_LABEL(_kern_features_altq, OID_AUTO, name, \
123 CTLFLAG_RD | CTLFLAG_CAPRD, SYSCTL_NULL_INT_PTR, 1, \
127 ALTQ_FEATURE(cbq, "ALTQ Class Based Queuing discipline");
130 ALTQ_FEATURE(codel, "ALTQ Controlled Delay discipline");
133 ALTQ_FEATURE(red, "ALTQ Random Early Detection discipline");
136 ALTQ_FEATURE(rio, "ALTQ Random Early Drop discipline");
139 ALTQ_FEATURE(hfsc, "ALTQ Hierarchical Packet Scheduler discipline");
142 ALTQ_FEATURE(priq, "ATLQ Priority Queuing discipline");
145 ALTQ_FEATURE(fairq, "ALTQ Fair Queuing discipline");
150 * alternate queueing support routines
153 /* look up the queue state by the interface name and the queueing type. */
155 altq_lookup(name, type)
161 if ((ifp = ifunit(name)) != NULL) {
162 /* read if_snd unlocked */
163 if (type != ALTQT_NONE && ifp->if_snd.altq_type == type)
164 return (ifp->if_snd.altq_disc);
171 altq_attach(ifq, type, discipline, enqueue, dequeue, request)
175 int (*enqueue)(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
176 struct mbuf *(*dequeue)(struct ifaltq *, int);
177 int (*request)(struct ifaltq *, int, void *);
180 if (!ALTQ_IS_READY(ifq)) {
185 ifq->altq_type = type;
186 ifq->altq_disc = discipline;
187 ifq->altq_enqueue = enqueue;
188 ifq->altq_dequeue = dequeue;
189 ifq->altq_request = request;
190 ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED);
201 if (!ALTQ_IS_READY(ifq)) {
205 if (ALTQ_IS_ENABLED(ifq)) {
209 if (!ALTQ_IS_ATTACHED(ifq)) {
214 ifq->altq_type = ALTQT_NONE;
215 ifq->altq_disc = NULL;
216 ifq->altq_enqueue = NULL;
217 ifq->altq_dequeue = NULL;
218 ifq->altq_request = NULL;
219 ifq->altq_flags &= ALTQF_CANTCHANGE;
233 if (!ALTQ_IS_READY(ifq)) {
237 if (ALTQ_IS_ENABLED(ifq)) {
243 IFQ_PURGE_NOLOCK(ifq);
244 ASSERT(ifq->ifq_len == 0);
245 ifq->ifq_drv_maxlen = 0; /* disable bulk dequeue */
246 ifq->altq_flags |= ALTQF_ENABLED;
260 if (!ALTQ_IS_ENABLED(ifq)) {
266 IFQ_PURGE_NOLOCK(ifq);
267 ASSERT(ifq->ifq_len == 0);
268 ifq->altq_flags &= ~(ALTQF_ENABLED);
277 altq_assert(file, line, failedexpr)
278 const char *file, *failedexpr;
281 (void)printf("altq assertion \"%s\" failed: file \"%s\", line %d\n",
282 failedexpr, file, line);
283 panic("altq assertion");
289 * internal representation of token bucket parameters
290 * rate: (byte_per_unittime << TBR_SHIFT) / machclk_freq
291 * (((bits_per_sec) / 8) << TBR_SHIFT) / machclk_freq
292 * depth: byte << TBR_SHIFT
296 #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT)
297 #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT)
304 struct tb_regulator *tbr;
309 IFQ_LOCK_ASSERT(ifq);
311 if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) {
312 /* if this is a remove after poll, bypass tbr check */
314 /* update token only when it is negative */
315 if (tbr->tbr_token <= 0) {
316 now = read_machclk();
317 interval = now - tbr->tbr_last;
318 if (interval >= tbr->tbr_filluptime)
319 tbr->tbr_token = tbr->tbr_depth;
321 tbr->tbr_token += interval * tbr->tbr_rate;
322 if (tbr->tbr_token > tbr->tbr_depth)
323 tbr->tbr_token = tbr->tbr_depth;
327 /* if token is still negative, don't allow dequeue */
328 if (tbr->tbr_token <= 0)
332 if (ALTQ_IS_ENABLED(ifq))
333 m = (*ifq->altq_dequeue)(ifq, op);
335 if (op == ALTDQ_POLL)
341 if (m != NULL && op == ALTDQ_REMOVE)
342 tbr->tbr_token -= TBR_SCALE(m_pktlen(m));
343 tbr->tbr_lastop = op;
348 * set a token bucket regulator.
349 * if the specified rate is zero, the token bucket regulator is deleted.
352 tbr_set(ifq, profile)
354 struct tb_profile *profile;
356 struct tb_regulator *tbr, *otbr;
358 if (tbr_dequeue_ptr == NULL)
359 tbr_dequeue_ptr = tbr_dequeue;
361 if (machclk_freq == 0)
363 if (machclk_freq == 0) {
364 printf("tbr_set: no cpu clock available!\n");
369 if (profile->rate == 0) {
370 /* delete this tbr */
371 if ((tbr = ifq->altq_tbr) == NULL) {
375 ifq->altq_tbr = NULL;
381 tbr = malloc(sizeof(struct tb_regulator), M_DEVBUF, M_NOWAIT | M_ZERO);
387 tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq;
388 tbr->tbr_depth = TBR_SCALE(profile->depth);
389 if (tbr->tbr_rate > 0)
390 tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate;
392 tbr->tbr_filluptime = LLONG_MAX;
394 * The longest time between tbr_dequeue() calls will be about 1
395 * system tick, as the callout that drives it is scheduled once per
396 * tick. The refill-time detection logic in tbr_dequeue() can only
397 * properly detect the passage of up to LLONG_MAX machclk ticks.
398 * Therefore, in order for this logic to function properly in the
399 * extreme case, the maximum value of tbr_filluptime should be
400 * LLONG_MAX less one system tick's worth of machclk ticks less
401 * some additional slop factor (here one more system tick's worth
404 if (tbr->tbr_filluptime > (LLONG_MAX - 2 * machclk_per_tick))
405 tbr->tbr_filluptime = LLONG_MAX - 2 * machclk_per_tick;
406 tbr->tbr_token = tbr->tbr_depth;
407 tbr->tbr_last = read_machclk();
408 tbr->tbr_lastop = ALTDQ_REMOVE;
410 otbr = ifq->altq_tbr;
411 ifq->altq_tbr = tbr; /* set the new tbr */
414 free(otbr, M_DEVBUF);
416 if (tbr_timer == 0) {
417 CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0);
426 * tbr_timeout goes through the interface list, and kicks the drivers
435 VNET_ITERATOR_DECL(vnet_iter);
437 struct epoch_tracker et;
442 VNET_LIST_RLOCK_NOSLEEP();
443 VNET_FOREACH(vnet_iter) {
444 CURVNET_SET(vnet_iter);
445 for (ifp = CK_STAILQ_FIRST(&V_ifnet); ifp;
446 ifp = CK_STAILQ_NEXT(ifp, if_link)) {
447 /* read from if_snd unlocked */
448 if (!TBR_IS_ENABLED(&ifp->if_snd))
451 if (!IFQ_IS_EMPTY(&ifp->if_snd) &&
452 ifp->if_start != NULL)
453 (*ifp->if_start)(ifp);
457 VNET_LIST_RUNLOCK_NOSLEEP();
460 CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0);
462 tbr_timer = 0; /* don't need tbr_timer anymore */
466 * attach a discipline to the interface. if one already exists, it is
468 * Locking is done in the discipline specific attach functions. Basically
469 * they call back to altq_attach which takes care of the attach and locking.
472 altq_pfattach(struct pf_altq *a)
476 switch (a->scheduler) {
481 error = cbq_pfattach(a);
486 error = priq_pfattach(a);
491 error = hfsc_pfattach(a);
496 error = fairq_pfattach(a);
501 error = codel_pfattach(a);
512 * detach a discipline from the interface.
513 * it is possible that the discipline was already overridden by another
517 altq_pfdetach(struct pf_altq *a)
522 if ((ifp = ifunit(a->ifname)) == NULL)
525 /* if this discipline is no longer referenced, just return */
526 /* read unlocked from if_snd */
527 if (a->altq_disc == NULL || a->altq_disc != ifp->if_snd.altq_disc)
531 /* read unlocked from if_snd, _disable and _detach take care */
532 if (ALTQ_IS_ENABLED(&ifp->if_snd))
533 error = altq_disable(&ifp->if_snd);
535 error = altq_detach(&ifp->if_snd);
542 * add a discipline or a queue
543 * Locking is done in the discipline specific functions with regards to
544 * malloc with WAITOK, also it is not yet clear which lock to use.
547 altq_add(struct ifnet *ifp, struct pf_altq *a)
551 if (a->qname[0] != 0)
552 return (altq_add_queue(a));
554 if (machclk_freq == 0)
556 if (machclk_freq == 0)
557 panic("altq_add: no cpu clock");
559 switch (a->scheduler) {
562 error = cbq_add_altq(ifp, a);
567 error = priq_add_altq(ifp, a);
572 error = hfsc_add_altq(ifp, a);
577 error = fairq_add_altq(ifp, a);
582 error = codel_add_altq(ifp, a);
593 * remove a discipline or a queue
594 * It is yet unclear what lock to use to protect this operation, the
595 * discipline specific functions will determine and grab it
598 altq_remove(struct pf_altq *a)
602 if (a->qname[0] != 0)
603 return (altq_remove_queue(a));
605 switch (a->scheduler) {
608 error = cbq_remove_altq(a);
613 error = priq_remove_altq(a);
618 error = hfsc_remove_altq(a);
623 error = fairq_remove_altq(a);
628 error = codel_remove_altq(a);
639 * add a queue to the discipline
640 * It is yet unclear what lock to use to protect this operation, the
641 * discipline specific functions will determine and grab it
644 altq_add_queue(struct pf_altq *a)
648 switch (a->scheduler) {
651 error = cbq_add_queue(a);
656 error = priq_add_queue(a);
661 error = hfsc_add_queue(a);
666 error = fairq_add_queue(a);
677 * remove a queue from the discipline
678 * It is yet unclear what lock to use to protect this operation, the
679 * discipline specific functions will determine and grab it
682 altq_remove_queue(struct pf_altq *a)
686 switch (a->scheduler) {
689 error = cbq_remove_queue(a);
694 error = priq_remove_queue(a);
699 error = hfsc_remove_queue(a);
704 error = fairq_remove_queue(a);
715 * get queue statistics
716 * Locking is done in the discipline specific functions with regards to
717 * copyout operations, also it is not yet clear which lock to use.
720 altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes, int version)
724 switch (a->scheduler) {
727 error = cbq_getqstats(a, ubuf, nbytes, version);
732 error = priq_getqstats(a, ubuf, nbytes, version);
737 error = hfsc_getqstats(a, ubuf, nbytes, version);
742 error = fairq_getqstats(a, ubuf, nbytes, version);
747 error = codel_getqstats(a, ubuf, nbytes, version);
758 * read and write diffserv field in IPv4 or IPv6 header
761 read_dsfield(m, pktattr)
763 struct altq_pktattr *pktattr;
766 u_int8_t ds_field = 0;
768 if (pktattr == NULL ||
769 (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
770 return ((u_int8_t)0);
772 /* verify that pattr_hdr is within the mbuf data */
773 for (m0 = m; m0 != NULL; m0 = m0->m_next)
774 if ((pktattr->pattr_hdr >= m0->m_data) &&
775 (pktattr->pattr_hdr < m0->m_data + m0->m_len))
778 /* ick, pattr_hdr is stale */
779 pktattr->pattr_af = AF_UNSPEC;
781 printf("read_dsfield: can't locate header!\n");
783 return ((u_int8_t)0);
786 if (pktattr->pattr_af == AF_INET) {
787 struct ip *ip = (struct ip *)pktattr->pattr_hdr;
790 return ((u_int8_t)0); /* version mismatch! */
791 ds_field = ip->ip_tos;
794 else if (pktattr->pattr_af == AF_INET6) {
795 struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
798 flowlabel = ntohl(ip6->ip6_flow);
799 if ((flowlabel >> 28) != 6)
800 return ((u_int8_t)0); /* version mismatch! */
801 ds_field = (flowlabel >> 20) & 0xff;
808 write_dsfield(struct mbuf *m, struct altq_pktattr *pktattr, u_int8_t dsfield)
812 if (pktattr == NULL ||
813 (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
816 /* verify that pattr_hdr is within the mbuf data */
817 for (m0 = m; m0 != NULL; m0 = m0->m_next)
818 if ((pktattr->pattr_hdr >= m0->m_data) &&
819 (pktattr->pattr_hdr < m0->m_data + m0->m_len))
822 /* ick, pattr_hdr is stale */
823 pktattr->pattr_af = AF_UNSPEC;
825 printf("write_dsfield: can't locate header!\n");
830 if (pktattr->pattr_af == AF_INET) {
831 struct ip *ip = (struct ip *)pktattr->pattr_hdr;
836 return; /* version mismatch! */
838 dsfield |= old & 3; /* leave CU bits */
841 ip->ip_tos = dsfield;
843 * update checksum (from RFC1624)
844 * HC' = ~(~HC + ~m + m')
846 sum = ~ntohs(ip->ip_sum) & 0xffff;
847 sum += 0xff00 + (~old & 0xff) + dsfield;
848 sum = (sum >> 16) + (sum & 0xffff);
849 sum += (sum >> 16); /* add carry */
851 ip->ip_sum = htons(~sum & 0xffff);
854 else if (pktattr->pattr_af == AF_INET6) {
855 struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
858 flowlabel = ntohl(ip6->ip6_flow);
859 if ((flowlabel >> 28) != 6)
860 return; /* version mismatch! */
861 flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20);
862 ip6->ip6_flow = htonl(flowlabel);
869 * high resolution clock support taking advantage of a machine dependent
870 * high resolution time counter (e.g., timestamp counter of intel pentium).
872 * - 64-bit-long monotonically-increasing counter
873 * - frequency range is 100M-4GHz (CPU speed)
875 /* if pcc is not available or disabled, emulate 256MHz using microtime() */
876 #define MACHCLK_SHIFT 8
879 u_int32_t machclk_freq;
880 u_int32_t machclk_per_tick;
882 #if defined(__i386__) && defined(__NetBSD__)
883 extern u_int64_t cpu_tsc_freq;
886 #if (__FreeBSD_version >= 700035)
887 /* Update TSC freq with the value indicated by the caller. */
889 tsc_freq_changed(void *arg, const struct cf_level *level, int status)
891 /* If there was an error during the transition, don't do anything. */
895 #if (__FreeBSD_version >= 701102) && (defined(__amd64__) || defined(__i386__))
896 /* If TSC is P-state invariant, don't do anything. */
897 if (tsc_is_invariant)
901 /* Total setting for this level gives the new frequency in MHz. */
904 EVENTHANDLER_DEFINE(cpufreq_post_change, tsc_freq_changed, NULL,
905 EVENTHANDLER_PRI_LAST);
906 #endif /* __FreeBSD_version >= 700035 */
909 init_machclk_setup(void)
911 callout_init(&tbr_callout, 1);
915 #if (!defined(__amd64__) && !defined(__i386__)) || defined(ALTQ_NOPCC)
918 #if defined(__FreeBSD__) && defined(SMP)
921 #if defined(__NetBSD__) && defined(MULTIPROCESSOR)
924 #if defined(__amd64__) || defined(__i386__)
925 /* check if TSC is available */
926 if ((cpu_feature & CPUID_TSC) == 0 ||
927 atomic_load_acq_64(&tsc_freq) == 0)
937 /* Call one-time initialization function. */
939 init_machclk_setup();
943 if (machclk_usepcc == 0) {
944 /* emulate 256MHz using microtime() */
945 machclk_freq = 1000000 << MACHCLK_SHIFT;
946 machclk_per_tick = machclk_freq / hz;
948 printf("altq: emulate %uHz cpu clock\n", machclk_freq);
954 * if the clock frequency (of Pentium TSC or Alpha PCC) is
955 * accessible, just use it.
957 #if defined(__amd64__) || defined(__i386__)
958 machclk_freq = atomic_load_acq_64(&tsc_freq);
962 * if we don't know the clock frequency, measure it.
964 if (machclk_freq == 0) {
966 struct timeval tv_start, tv_end;
967 u_int64_t start, end, diff;
970 microtime(&tv_start);
971 start = read_machclk();
972 timo = hz; /* 1 sec */
973 (void)tsleep(&wait, PWAIT | PCATCH, "init_machclk", timo);
975 end = read_machclk();
976 diff = (u_int64_t)(tv_end.tv_sec - tv_start.tv_sec) * 1000000
977 + tv_end.tv_usec - tv_start.tv_usec;
979 machclk_freq = (u_int)((end - start) * 1000000 / diff);
982 machclk_per_tick = machclk_freq / hz;
985 printf("altq: CPU clock: %uHz\n", machclk_freq);
989 #if defined(__OpenBSD__) && defined(__i386__)
990 static __inline u_int64_t
994 __asm __volatile(".byte 0x0f, 0x31" : "=A" (rv));
997 #endif /* __OpenBSD__ && __i386__ */
1004 if (machclk_usepcc) {
1005 #if defined(__amd64__) || defined(__i386__)
1008 panic("read_machclk");
1011 struct timeval tv, boottime;
1014 getboottime(&boottime);
1015 val = (((u_int64_t)(tv.tv_sec - boottime.tv_sec) * 1000000
1016 + tv.tv_usec) << MACHCLK_SHIFT);
1021 #ifdef ALTQ3_CLFIER_COMPAT
1024 #define IPPROTO_ESP 50 /* encapsulating security payload */
1027 #define IPPROTO_AH 51 /* authentication header */
1031 * extract flow information from a given packet.
1032 * filt_mask shows flowinfo fields required.
1033 * we assume the ip header is in one mbuf, and addresses and ports are
1034 * in network byte order.
1037 altq_extractflow(m, af, flow, filt_bmask)
1040 struct flowinfo *flow;
1041 u_int32_t filt_bmask;
1046 struct flowinfo_in *fin;
1049 ip = mtod(m, struct ip *);
1054 fin = (struct flowinfo_in *)flow;
1055 fin->fi_len = sizeof(struct flowinfo_in);
1056 fin->fi_family = AF_INET;
1058 fin->fi_proto = ip->ip_p;
1059 fin->fi_tos = ip->ip_tos;
1061 fin->fi_src.s_addr = ip->ip_src.s_addr;
1062 fin->fi_dst.s_addr = ip->ip_dst.s_addr;
1064 if (filt_bmask & FIMB4_PORTS)
1065 /* if port info is required, extract port numbers */
1066 extract_ports4(m, ip, fin);
1077 struct flowinfo_in6 *fin6;
1078 struct ip6_hdr *ip6;
1080 ip6 = mtod(m, struct ip6_hdr *);
1081 /* should we check the ip version? */
1083 fin6 = (struct flowinfo_in6 *)flow;
1084 fin6->fi6_len = sizeof(struct flowinfo_in6);
1085 fin6->fi6_family = AF_INET6;
1087 fin6->fi6_proto = ip6->ip6_nxt;
1088 fin6->fi6_tclass = IPV6_TRAFFIC_CLASS(ip6);
1090 fin6->fi6_flowlabel = ip6->ip6_flow & htonl(0x000fffff);
1091 fin6->fi6_src = ip6->ip6_src;
1092 fin6->fi6_dst = ip6->ip6_dst;
1094 if ((filt_bmask & FIMB6_PORTS) ||
1095 ((filt_bmask & FIMB6_PROTO)
1096 && ip6->ip6_nxt > IPPROTO_IPV6))
1098 * if port info is required, or proto is required
1099 * but there are option headers, extract port
1100 * and protocol numbers.
1102 extract_ports6(m, ip6, fin6);
1104 fin6->fi6_sport = 0;
1105 fin6->fi6_dport = 0;
1117 flow->fi_len = sizeof(struct flowinfo);
1118 flow->fi_family = AF_UNSPEC;
1123 * helper routine to extract port numbers
1125 /* structure for ipsec and ipv6 option header template */
1127 u_int8_t opt6_nxt; /* next header */
1128 u_int8_t opt6_hlen; /* header extension length */
1130 u_int32_t ah_spi; /* security parameter index
1131 for authentication header */
1135 * extract port numbers from a ipv4 packet.
1138 extract_ports4(m, ip, fin)
1141 struct flowinfo_in *fin;
1152 ip_off = ntohs(ip->ip_off);
1153 /* if it is a fragment, try cached fragment info */
1154 if (ip_off & IP_OFFMASK) {
1155 ip4f_lookup(ip, fin);
1159 /* locate the mbuf containing the protocol header */
1160 for (m0 = m; m0 != NULL; m0 = m0->m_next)
1161 if (((caddr_t)ip >= m0->m_data) &&
1162 ((caddr_t)ip < m0->m_data + m0->m_len))
1166 printf("extract_ports4: can't locate header! ip=%p\n", ip);
1170 off = ((caddr_t)ip - m0->m_data) + (ip->ip_hl << 2);
1176 while (off >= m0->m_len) {
1180 return (0); /* bogus ip_hl! */
1182 if (m0->m_len < off + 4)
1190 udp = (struct udphdr *)(mtod(m0, caddr_t) + off);
1191 fin->fi_sport = udp->uh_sport;
1192 fin->fi_dport = udp->uh_dport;
1193 fin->fi_proto = proto;
1199 if (fin->fi_gpi == 0){
1202 gpi = (u_int32_t *)(mtod(m0, caddr_t) + off);
1205 fin->fi_proto = proto;
1209 /* get next header and header length */
1212 opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off);
1213 proto = opt6->opt6_nxt;
1214 off += 8 + (opt6->opt6_hlen * 4);
1215 if (fin->fi_gpi == 0 && m0->m_len >= off + 8)
1216 fin->fi_gpi = opt6->ah_spi;
1218 /* goto the next header */
1220 #endif /* ALTQ_IPSEC */
1223 fin->fi_proto = proto;
1227 /* if this is a first fragment, cache it. */
1229 ip4f_cache(ip, fin);
1236 extract_ports6(m, ip6, fin6)
1238 struct ip6_hdr *ip6;
1239 struct flowinfo_in6 *fin6;
1246 fin6->fi6_sport = 0;
1247 fin6->fi6_dport = 0;
1249 /* locate the mbuf containing the protocol header */
1250 for (m0 = m; m0 != NULL; m0 = m0->m_next)
1251 if (((caddr_t)ip6 >= m0->m_data) &&
1252 ((caddr_t)ip6 < m0->m_data + m0->m_len))
1256 printf("extract_ports6: can't locate header! ip6=%p\n", ip6);
1260 off = ((caddr_t)ip6 - m0->m_data) + sizeof(struct ip6_hdr);
1262 proto = ip6->ip6_nxt;
1264 while (off >= m0->m_len) {
1270 if (m0->m_len < off + 4)
1278 udp = (struct udphdr *)(mtod(m0, caddr_t) + off);
1279 fin6->fi6_sport = udp->uh_sport;
1280 fin6->fi6_dport = udp->uh_dport;
1281 fin6->fi6_proto = proto;
1286 if (fin6->fi6_gpi == 0) {
1289 gpi = (u_int32_t *)(mtod(m0, caddr_t) + off);
1290 fin6->fi6_gpi = *gpi;
1292 fin6->fi6_proto = proto;
1296 /* get next header and header length */
1299 opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off);
1300 if (fin6->fi6_gpi == 0 && m0->m_len >= off + 8)
1301 fin6->fi6_gpi = opt6->ah_spi;
1302 proto = opt6->opt6_nxt;
1303 off += 8 + (opt6->opt6_hlen * 4);
1304 /* goto the next header */
1308 case IPPROTO_HOPOPTS:
1309 case IPPROTO_ROUTING:
1310 case IPPROTO_DSTOPTS: {
1311 /* get next header and header length */
1314 opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off);
1315 proto = opt6->opt6_nxt;
1316 off += (opt6->opt6_hlen + 1) * 8;
1317 /* goto the next header */
1321 case IPPROTO_FRAGMENT:
1322 /* ipv6 fragmentations are not supported yet */
1324 fin6->fi6_proto = proto;
1333 * altq common classifier
1336 acc_add_filter(classifier, filter, class, phandle)
1337 struct acc_classifier *classifier;
1338 struct flow_filter *filter;
1342 struct acc_filter *afp, *prev, *tmp;
1346 if (filter->ff_flow.fi_family != AF_INET &&
1347 filter->ff_flow.fi_family != AF_INET6)
1350 if (filter->ff_flow.fi_family != AF_INET)
1354 afp = malloc(sizeof(struct acc_filter),
1355 M_DEVBUF, M_WAITOK);
1358 bzero(afp, sizeof(struct acc_filter));
1360 afp->f_filter = *filter;
1361 afp->f_class = class;
1363 i = ACC_WILDCARD_INDEX;
1364 if (filter->ff_flow.fi_family == AF_INET) {
1365 struct flow_filter *filter4 = &afp->f_filter;
1368 * if address is 0, it's a wildcard. if address mask
1369 * isn't set, use full mask.
1371 if (filter4->ff_flow.fi_dst.s_addr == 0)
1372 filter4->ff_mask.mask_dst.s_addr = 0;
1373 else if (filter4->ff_mask.mask_dst.s_addr == 0)
1374 filter4->ff_mask.mask_dst.s_addr = 0xffffffff;
1375 if (filter4->ff_flow.fi_src.s_addr == 0)
1376 filter4->ff_mask.mask_src.s_addr = 0;
1377 else if (filter4->ff_mask.mask_src.s_addr == 0)
1378 filter4->ff_mask.mask_src.s_addr = 0xffffffff;
1380 /* clear extra bits in addresses */
1381 filter4->ff_flow.fi_dst.s_addr &=
1382 filter4->ff_mask.mask_dst.s_addr;
1383 filter4->ff_flow.fi_src.s_addr &=
1384 filter4->ff_mask.mask_src.s_addr;
1387 * if dst address is a wildcard, use hash-entry
1388 * ACC_WILDCARD_INDEX.
1390 if (filter4->ff_mask.mask_dst.s_addr != 0xffffffff)
1391 i = ACC_WILDCARD_INDEX;
1393 i = ACC_GET_HASH_INDEX(filter4->ff_flow.fi_dst.s_addr);
1396 else if (filter->ff_flow.fi_family == AF_INET6) {
1397 struct flow_filter6 *filter6 =
1398 (struct flow_filter6 *)&afp->f_filter;
1399 #ifndef IN6MASK0 /* taken from kame ipv6 */
1400 #define IN6MASK0 {{{ 0, 0, 0, 0 }}}
1401 #define IN6MASK128 {{{ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }}}
1402 const struct in6_addr in6mask0 = IN6MASK0;
1403 const struct in6_addr in6mask128 = IN6MASK128;
1406 if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_flow6.fi6_dst))
1407 filter6->ff_mask6.mask6_dst = in6mask0;
1408 else if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_mask6.mask6_dst))
1409 filter6->ff_mask6.mask6_dst = in6mask128;
1410 if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_flow6.fi6_src))
1411 filter6->ff_mask6.mask6_src = in6mask0;
1412 else if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_mask6.mask6_src))
1413 filter6->ff_mask6.mask6_src = in6mask128;
1415 /* clear extra bits in addresses */
1416 for (i = 0; i < 16; i++)
1417 filter6->ff_flow6.fi6_dst.s6_addr[i] &=
1418 filter6->ff_mask6.mask6_dst.s6_addr[i];
1419 for (i = 0; i < 16; i++)
1420 filter6->ff_flow6.fi6_src.s6_addr[i] &=
1421 filter6->ff_mask6.mask6_src.s6_addr[i];
1423 if (filter6->ff_flow6.fi6_flowlabel == 0)
1424 i = ACC_WILDCARD_INDEX;
1426 i = ACC_GET_HASH_INDEX(filter6->ff_flow6.fi6_flowlabel);
1430 afp->f_handle = get_filt_handle(classifier, i);
1432 /* update filter bitmask */
1433 afp->f_fbmask = filt2fibmask(filter);
1434 classifier->acc_fbmask |= afp->f_fbmask;
1437 * add this filter to the filter list.
1438 * filters are ordered from the highest rule number.
1442 LIST_FOREACH(tmp, &classifier->acc_filters[i], f_chain) {
1443 if (tmp->f_filter.ff_ruleno > afp->f_filter.ff_ruleno)
1449 LIST_INSERT_HEAD(&classifier->acc_filters[i], afp, f_chain);
1451 LIST_INSERT_AFTER(prev, afp, f_chain);
1454 *phandle = afp->f_handle;
1459 acc_delete_filter(classifier, handle)
1460 struct acc_classifier *classifier;
1463 struct acc_filter *afp;
1466 if ((afp = filth_to_filtp(classifier, handle)) == NULL)
1470 LIST_REMOVE(afp, f_chain);
1473 free(afp, M_DEVBUF);
1475 /* todo: update filt_bmask */
1481 * delete filters referencing to the specified class.
1482 * if the all flag is not 0, delete all the filters.
1485 acc_discard_filters(classifier, class, all)
1486 struct acc_classifier *classifier;
1490 struct acc_filter *afp;
1494 for (i = 0; i < ACC_FILTER_TABLESIZE; i++) {
1496 LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain)
1497 if (all || afp->f_class == class) {
1498 LIST_REMOVE(afp, f_chain);
1499 free(afp, M_DEVBUF);
1500 /* start again from the head */
1503 } while (afp != NULL);
1508 classifier->acc_fbmask = 0;
1514 acc_classify(clfier, m, af)
1519 struct acc_classifier *classifier;
1520 struct flowinfo flow;
1521 struct acc_filter *afp;
1524 classifier = (struct acc_classifier *)clfier;
1525 altq_extractflow(m, af, &flow, classifier->acc_fbmask);
1527 if (flow.fi_family == AF_INET) {
1528 struct flowinfo_in *fp = (struct flowinfo_in *)&flow;
1530 if ((classifier->acc_fbmask & FIMB4_ALL) == FIMB4_TOS) {
1531 /* only tos is used */
1533 &classifier->acc_filters[ACC_WILDCARD_INDEX],
1535 if (apply_tosfilter4(afp->f_fbmask,
1536 &afp->f_filter, fp))
1537 /* filter matched */
1538 return (afp->f_class);
1539 } else if ((classifier->acc_fbmask &
1540 (~(FIMB4_PROTO|FIMB4_SPORT|FIMB4_DPORT) & FIMB4_ALL))
1542 /* only proto and ports are used */
1544 &classifier->acc_filters[ACC_WILDCARD_INDEX],
1546 if (apply_ppfilter4(afp->f_fbmask,
1547 &afp->f_filter, fp))
1548 /* filter matched */
1549 return (afp->f_class);
1551 /* get the filter hash entry from its dest address */
1552 i = ACC_GET_HASH_INDEX(fp->fi_dst.s_addr);
1555 * go through this loop twice. first for dst
1556 * hash, second for wildcards.
1558 LIST_FOREACH(afp, &classifier->acc_filters[i],
1560 if (apply_filter4(afp->f_fbmask,
1561 &afp->f_filter, fp))
1562 /* filter matched */
1563 return (afp->f_class);
1566 * check again for filters with a dst addr
1568 * (daddr == 0 || dmask != 0xffffffff).
1570 if (i != ACC_WILDCARD_INDEX)
1571 i = ACC_WILDCARD_INDEX;
1578 else if (flow.fi_family == AF_INET6) {
1579 struct flowinfo_in6 *fp6 = (struct flowinfo_in6 *)&flow;
1581 /* get the filter hash entry from its flow ID */
1582 if (fp6->fi6_flowlabel != 0)
1583 i = ACC_GET_HASH_INDEX(fp6->fi6_flowlabel);
1585 /* flowlable can be zero */
1586 i = ACC_WILDCARD_INDEX;
1588 /* go through this loop twice. first for flow hash, second
1591 LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain)
1592 if (apply_filter6(afp->f_fbmask,
1593 (struct flow_filter6 *)&afp->f_filter,
1595 /* filter matched */
1596 return (afp->f_class);
1599 * check again for filters with a wildcard.
1601 if (i != ACC_WILDCARD_INDEX)
1602 i = ACC_WILDCARD_INDEX;
1609 /* no filter matched */
1614 apply_filter4(fbmask, filt, pkt)
1616 struct flow_filter *filt;
1617 struct flowinfo_in *pkt;
1619 if (filt->ff_flow.fi_family != AF_INET)
1621 if ((fbmask & FIMB4_SPORT) && filt->ff_flow.fi_sport != pkt->fi_sport)
1623 if ((fbmask & FIMB4_DPORT) && filt->ff_flow.fi_dport != pkt->fi_dport)
1625 if ((fbmask & FIMB4_DADDR) &&
1626 filt->ff_flow.fi_dst.s_addr !=
1627 (pkt->fi_dst.s_addr & filt->ff_mask.mask_dst.s_addr))
1629 if ((fbmask & FIMB4_SADDR) &&
1630 filt->ff_flow.fi_src.s_addr !=
1631 (pkt->fi_src.s_addr & filt->ff_mask.mask_src.s_addr))
1633 if ((fbmask & FIMB4_PROTO) && filt->ff_flow.fi_proto != pkt->fi_proto)
1635 if ((fbmask & FIMB4_TOS) && filt->ff_flow.fi_tos !=
1636 (pkt->fi_tos & filt->ff_mask.mask_tos))
1638 if ((fbmask & FIMB4_GPI) && filt->ff_flow.fi_gpi != (pkt->fi_gpi))
1645 * filter matching function optimized for a common case that checks
1646 * only protocol and port numbers
1649 apply_ppfilter4(fbmask, filt, pkt)
1651 struct flow_filter *filt;
1652 struct flowinfo_in *pkt;
1654 if (filt->ff_flow.fi_family != AF_INET)
1656 if ((fbmask & FIMB4_SPORT) && filt->ff_flow.fi_sport != pkt->fi_sport)
1658 if ((fbmask & FIMB4_DPORT) && filt->ff_flow.fi_dport != pkt->fi_dport)
1660 if ((fbmask & FIMB4_PROTO) && filt->ff_flow.fi_proto != pkt->fi_proto)
1667 * filter matching function only for tos field.
1670 apply_tosfilter4(fbmask, filt, pkt)
1672 struct flow_filter *filt;
1673 struct flowinfo_in *pkt;
1675 if (filt->ff_flow.fi_family != AF_INET)
1677 if ((fbmask & FIMB4_TOS) && filt->ff_flow.fi_tos !=
1678 (pkt->fi_tos & filt->ff_mask.mask_tos))
1686 apply_filter6(fbmask, filt, pkt)
1688 struct flow_filter6 *filt;
1689 struct flowinfo_in6 *pkt;
1693 if (filt->ff_flow6.fi6_family != AF_INET6)
1695 if ((fbmask & FIMB6_FLABEL) &&
1696 filt->ff_flow6.fi6_flowlabel != pkt->fi6_flowlabel)
1698 if ((fbmask & FIMB6_PROTO) &&
1699 filt->ff_flow6.fi6_proto != pkt->fi6_proto)
1701 if ((fbmask & FIMB6_SPORT) &&
1702 filt->ff_flow6.fi6_sport != pkt->fi6_sport)
1704 if ((fbmask & FIMB6_DPORT) &&
1705 filt->ff_flow6.fi6_dport != pkt->fi6_dport)
1707 if (fbmask & FIMB6_SADDR) {
1708 for (i = 0; i < 4; i++)
1709 if (filt->ff_flow6.fi6_src.s6_addr32[i] !=
1710 (pkt->fi6_src.s6_addr32[i] &
1711 filt->ff_mask6.mask6_src.s6_addr32[i]))
1714 if (fbmask & FIMB6_DADDR) {
1715 for (i = 0; i < 4; i++)
1716 if (filt->ff_flow6.fi6_dst.s6_addr32[i] !=
1717 (pkt->fi6_dst.s6_addr32[i] &
1718 filt->ff_mask6.mask6_dst.s6_addr32[i]))
1721 if ((fbmask & FIMB6_TCLASS) &&
1722 filt->ff_flow6.fi6_tclass !=
1723 (pkt->fi6_tclass & filt->ff_mask6.mask6_tclass))
1725 if ((fbmask & FIMB6_GPI) &&
1726 filt->ff_flow6.fi6_gpi != pkt->fi6_gpi)
1735 * bit 20-28: index to the filter hash table
1736 * bit 0-19: unique id in the hash bucket.
1739 get_filt_handle(classifier, i)
1740 struct acc_classifier *classifier;
1743 static u_long handle_number = 1;
1745 struct acc_filter *afp;
1748 handle = handle_number++ & 0x000fffff;
1750 if (LIST_EMPTY(&classifier->acc_filters[i]))
1753 LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain)
1754 if ((afp->f_handle & 0x000fffff) == handle)
1758 /* this handle is already used, try again */
1761 return ((i << 20) | handle);
1764 /* convert filter handle to filter pointer */
1765 static struct acc_filter *
1766 filth_to_filtp(classifier, handle)
1767 struct acc_classifier *classifier;
1770 struct acc_filter *afp;
1773 i = ACC_GET_HINDEX(handle);
1775 LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain)
1776 if (afp->f_handle == handle)
1782 /* create flowinfo bitmask */
1785 struct flow_filter *filt;
1789 struct flow_filter6 *filt6;
1792 switch (filt->ff_flow.fi_family) {
1794 if (filt->ff_flow.fi_proto != 0)
1795 mask |= FIMB4_PROTO;
1796 if (filt->ff_flow.fi_tos != 0)
1798 if (filt->ff_flow.fi_dst.s_addr != 0)
1799 mask |= FIMB4_DADDR;
1800 if (filt->ff_flow.fi_src.s_addr != 0)
1801 mask |= FIMB4_SADDR;
1802 if (filt->ff_flow.fi_sport != 0)
1803 mask |= FIMB4_SPORT;
1804 if (filt->ff_flow.fi_dport != 0)
1805 mask |= FIMB4_DPORT;
1806 if (filt->ff_flow.fi_gpi != 0)
1811 filt6 = (struct flow_filter6 *)filt;
1813 if (filt6->ff_flow6.fi6_proto != 0)
1814 mask |= FIMB6_PROTO;
1815 if (filt6->ff_flow6.fi6_tclass != 0)
1816 mask |= FIMB6_TCLASS;
1817 if (!IN6_IS_ADDR_UNSPECIFIED(&filt6->ff_flow6.fi6_dst))
1818 mask |= FIMB6_DADDR;
1819 if (!IN6_IS_ADDR_UNSPECIFIED(&filt6->ff_flow6.fi6_src))
1820 mask |= FIMB6_SADDR;
1821 if (filt6->ff_flow6.fi6_sport != 0)
1822 mask |= FIMB6_SPORT;
1823 if (filt6->ff_flow6.fi6_dport != 0)
1824 mask |= FIMB6_DPORT;
1825 if (filt6->ff_flow6.fi6_gpi != 0)
1827 if (filt6->ff_flow6.fi6_flowlabel != 0)
1828 mask |= FIMB6_FLABEL;
1836 * helper functions to handle IPv4 fragments.
1837 * currently only in-sequence fragments are handled.
1838 * - fragment info is cached in a LRU list.
1839 * - when a first fragment is found, cache its flow info.
1840 * - when a non-first fragment is found, lookup the cache.
1844 TAILQ_ENTRY(ip4_frag) ip4f_chain;
1847 struct flowinfo_in ip4f_info;
1850 static TAILQ_HEAD(ip4f_list, ip4_frag) ip4f_list; /* IPv4 fragment cache */
1852 #define IP4F_TABSIZE 16 /* IPv4 fragment cache size */
1857 struct flowinfo_in *fin;
1859 struct ip4_frag *fp;
1861 if (TAILQ_EMPTY(&ip4f_list)) {
1862 /* first time call, allocate fragment cache entries. */
1863 if (ip4f_init() < 0)
1864 /* allocation failed! */
1869 fp->ip4f_id = ip->ip_id;
1870 fp->ip4f_info.fi_proto = ip->ip_p;
1871 fp->ip4f_info.fi_src.s_addr = ip->ip_src.s_addr;
1872 fp->ip4f_info.fi_dst.s_addr = ip->ip_dst.s_addr;
1874 /* save port numbers */
1875 fp->ip4f_info.fi_sport = fin->fi_sport;
1876 fp->ip4f_info.fi_dport = fin->fi_dport;
1877 fp->ip4f_info.fi_gpi = fin->fi_gpi;
1881 ip4f_lookup(ip, fin)
1883 struct flowinfo_in *fin;
1885 struct ip4_frag *fp;
1887 for (fp = TAILQ_FIRST(&ip4f_list); fp != NULL && fp->ip4f_valid;
1888 fp = TAILQ_NEXT(fp, ip4f_chain))
1889 if (ip->ip_id == fp->ip4f_id &&
1890 ip->ip_src.s_addr == fp->ip4f_info.fi_src.s_addr &&
1891 ip->ip_dst.s_addr == fp->ip4f_info.fi_dst.s_addr &&
1892 ip->ip_p == fp->ip4f_info.fi_proto) {
1893 /* found the matching entry */
1894 fin->fi_sport = fp->ip4f_info.fi_sport;
1895 fin->fi_dport = fp->ip4f_info.fi_dport;
1896 fin->fi_gpi = fp->ip4f_info.fi_gpi;
1898 if ((ntohs(ip->ip_off) & IP_MF) == 0)
1899 /* this is the last fragment,
1900 release the entry. */
1906 /* no matching entry found */
1913 struct ip4_frag *fp;
1916 TAILQ_INIT(&ip4f_list);
1917 for (i=0; i<IP4F_TABSIZE; i++) {
1918 fp = malloc(sizeof(struct ip4_frag),
1919 M_DEVBUF, M_NOWAIT);
1921 printf("ip4f_init: can't alloc %dth entry!\n", i);
1927 TAILQ_INSERT_TAIL(&ip4f_list, fp, ip4f_chain);
1932 static struct ip4_frag *
1935 struct ip4_frag *fp;
1937 /* reclaim an entry at the tail, put it at the head */
1938 fp = TAILQ_LAST(&ip4f_list, ip4f_list);
1939 TAILQ_REMOVE(&ip4f_list, fp, ip4f_chain);
1941 TAILQ_INSERT_HEAD(&ip4f_list, fp, ip4f_chain);
1947 struct ip4_frag *fp;
1949 TAILQ_REMOVE(&ip4f_list, fp, ip4f_chain);
1951 TAILQ_INSERT_TAIL(&ip4f_list, fp, ip4f_chain);
1954 #endif /* ALTQ3_CLFIER_COMPAT */