2 * Codel/FQ_Codel and PIE/FQ-PIE Code:
3 * Copyright (C) 2016 Centre for Advanced Internet Architectures,
4 * Swinburne University of Technology, Melbourne, Australia.
5 * Portions of this code were made possible in part by a gift from
6 * The Comcast Innovation Fund.
7 * Implemented by Rasool Al-Saadi <ralsaadi@swin.edu.au>
9 * Copyright (c) 1998-2002,2010 Luigi Rizzo, Universita` di Pisa
10 * Portions Copyright (c) 2000 Akamba Corp.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 * Configuration and internal object management for dummynet.
42 #include "opt_inet6.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
48 #include <sys/kernel.h>
50 #include <sys/module.h>
53 #include <sys/rwlock.h>
54 #include <sys/socket.h>
55 #include <sys/socketvar.h>
57 #include <sys/taskqueue.h>
58 #include <net/if.h> /* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
59 #include <netinet/in.h>
60 #include <netinet/ip_var.h> /* ip_output(), IP_FORWARDING */
61 #include <netinet/ip_fw.h>
62 #include <netinet/ip_dummynet.h>
64 #include <netpfil/ipfw/ip_fw_private.h>
65 #include <netpfil/ipfw/dn_heap.h>
66 #include <netpfil/ipfw/ip_dn_private.h>
68 #include <netpfil/ipfw/dn_aqm.h>
70 #include <netpfil/ipfw/dn_sched.h>
72 /* which objects to copy */
73 #define DN_C_LINK 0x01
75 #define DN_C_FLOW 0x04
77 #define DN_C_QUEUE 0x10
79 /* we use this argument in case of a schk_new */
85 /*---- callout hooks. ----*/
86 static struct callout dn_timeout;
88 static struct task dn_task;
89 static struct taskqueue *dn_tq = NULL;
95 (void)arg; /* UNUSED */
96 taskqueue_enqueue_fast(dn_tq, &dn_task);
105 callout_reset_sbt(&dn_timeout, tick_sbt, 0, dummynet, NULL,
106 C_HARDCLOCK | C_DIRECT_EXEC);
108 /*----- end of callout hooks -----*/
111 /* Return AQM descriptor for given type or name. */
112 static struct dn_aqm *
113 find_aqm_type(int type, char *name)
117 SLIST_FOREACH(d, &dn_cfg.aqmlist, next) {
118 if (d->type == type || (name && !strcasecmp(d->name, name)))
121 return NULL; /* not found */
125 /* Return a scheduler descriptor given the type or name. */
126 static struct dn_alg *
127 find_sched_type(int type, char *name)
131 SLIST_FOREACH(d, &dn_cfg.schedlist, next) {
132 if (d->type == type || (name && !strcasecmp(d->name, name)))
135 return NULL; /* not found */
139 ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg)
142 const char *op = NULL;
150 } else if (oldv > hi) {
156 printf("%s %s to %d (was %d)\n", op, msg, *v, oldv);
160 /*---- flow_id mask, hash and compare functions ---*/
162 * The flow_id includes the 5-tuple, the queue/pipe number
163 * which we store in the extra area in host order,
164 * and for ipv6 also the flow_id6.
165 * XXX see if we want the tos byte (can store in 'flags')
167 static struct ipfw_flow_id *
168 flow_id_mask(struct ipfw_flow_id *mask, struct ipfw_flow_id *id)
170 int is_v6 = IS_IP6_FLOW_ID(id);
172 id->dst_port &= mask->dst_port;
173 id->src_port &= mask->src_port;
174 id->proto &= mask->proto;
175 id->extra &= mask->extra;
177 APPLY_MASK(&id->dst_ip6, &mask->dst_ip6);
178 APPLY_MASK(&id->src_ip6, &mask->src_ip6);
179 id->flow_id6 &= mask->flow_id6;
181 id->dst_ip &= mask->dst_ip;
182 id->src_ip &= mask->src_ip;
187 /* computes an OR of two masks, result in dst and also returned */
188 static struct ipfw_flow_id *
189 flow_id_or(struct ipfw_flow_id *src, struct ipfw_flow_id *dst)
191 int is_v6 = IS_IP6_FLOW_ID(dst);
193 dst->dst_port |= src->dst_port;
194 dst->src_port |= src->src_port;
195 dst->proto |= src->proto;
196 dst->extra |= src->extra;
198 #define OR_MASK(_d, _s) \
199 (_d)->__u6_addr.__u6_addr32[0] |= (_s)->__u6_addr.__u6_addr32[0]; \
200 (_d)->__u6_addr.__u6_addr32[1] |= (_s)->__u6_addr.__u6_addr32[1]; \
201 (_d)->__u6_addr.__u6_addr32[2] |= (_s)->__u6_addr.__u6_addr32[2]; \
202 (_d)->__u6_addr.__u6_addr32[3] |= (_s)->__u6_addr.__u6_addr32[3];
203 OR_MASK(&dst->dst_ip6, &src->dst_ip6);
204 OR_MASK(&dst->src_ip6, &src->src_ip6);
206 dst->flow_id6 |= src->flow_id6;
208 dst->dst_ip |= src->dst_ip;
209 dst->src_ip |= src->src_ip;
215 nonzero_mask(struct ipfw_flow_id *m)
217 if (m->dst_port || m->src_port || m->proto || m->extra)
219 if (IS_IP6_FLOW_ID(m)) {
221 m->dst_ip6.__u6_addr.__u6_addr32[0] ||
222 m->dst_ip6.__u6_addr.__u6_addr32[1] ||
223 m->dst_ip6.__u6_addr.__u6_addr32[2] ||
224 m->dst_ip6.__u6_addr.__u6_addr32[3] ||
225 m->src_ip6.__u6_addr.__u6_addr32[0] ||
226 m->src_ip6.__u6_addr.__u6_addr32[1] ||
227 m->src_ip6.__u6_addr.__u6_addr32[2] ||
228 m->src_ip6.__u6_addr.__u6_addr32[3] ||
231 return m->dst_ip || m->src_ip;
235 /* XXX we may want a better hash function */
237 flow_id_hash(struct ipfw_flow_id *id)
241 if (IS_IP6_FLOW_ID(id)) {
242 uint32_t *d = (uint32_t *)&id->dst_ip6;
243 uint32_t *s = (uint32_t *)&id->src_ip6;
244 i = (d[0] ) ^ (d[1]) ^
246 (d[0] >> 15) ^ (d[1] >> 15) ^
247 (d[2] >> 15) ^ (d[3] >> 15) ^
248 (s[0] << 1) ^ (s[1] << 1) ^
249 (s[2] << 1) ^ (s[3] << 1) ^
250 (s[0] << 16) ^ (s[1] << 16) ^
251 (s[2] << 16) ^ (s[3] << 16) ^
252 (id->dst_port << 1) ^ (id->src_port) ^
254 (id->proto ) ^ (id->flow_id6);
256 i = (id->dst_ip) ^ (id->dst_ip >> 15) ^
257 (id->src_ip << 1) ^ (id->src_ip >> 16) ^
259 (id->dst_port << 1) ^ (id->src_port) ^ (id->proto);
264 /* Like bcmp, returns 0 if ids match, 1 otherwise. */
266 flow_id_cmp(struct ipfw_flow_id *id1, struct ipfw_flow_id *id2)
268 int is_v6 = IS_IP6_FLOW_ID(id1);
271 if (IS_IP6_FLOW_ID(id2))
272 return 1; /* different address families */
274 return (id1->dst_ip == id2->dst_ip &&
275 id1->src_ip == id2->src_ip &&
276 id1->dst_port == id2->dst_port &&
277 id1->src_port == id2->src_port &&
278 id1->proto == id2->proto &&
279 id1->extra == id2->extra) ? 0 : 1;
283 !bcmp(&id1->dst_ip6,&id2->dst_ip6, sizeof(id1->dst_ip6)) &&
284 !bcmp(&id1->src_ip6,&id2->src_ip6, sizeof(id1->src_ip6)) &&
285 id1->dst_port == id2->dst_port &&
286 id1->src_port == id2->src_port &&
287 id1->proto == id2->proto &&
288 id1->extra == id2->extra &&
289 id1->flow_id6 == id2->flow_id6) ? 0 : 1;
291 /*--------- end of flow-id mask, hash and compare ---------*/
293 /*--- support functions for the qht hashtable ----
294 * Entries are hashed by flow-id
297 q_hash(uintptr_t key, int flags, void *arg)
299 /* compute the hash slot from the flow id */
300 struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ?
301 &((struct dn_queue *)key)->ni.fid :
302 (struct ipfw_flow_id *)key;
304 return flow_id_hash(id);
308 q_match(void *obj, uintptr_t key, int flags, void *arg)
310 struct dn_queue *o = (struct dn_queue *)obj;
311 struct ipfw_flow_id *id2;
313 if (flags & DNHT_KEY_IS_OBJ) {
314 /* compare pointers */
315 id2 = &((struct dn_queue *)key)->ni.fid;
317 id2 = (struct ipfw_flow_id *)key;
319 return (0 == flow_id_cmp(&o->ni.fid, id2));
323 * create a new queue instance for the given 'key'.
326 q_new(uintptr_t key, int flags, void *arg)
328 struct dn_queue *q, *template = arg;
329 struct dn_fsk *fs = template->fs;
330 int size = sizeof(*q) + fs->sched->fp->q_datalen;
332 q = malloc(size, M_DUMMYNET, M_NOWAIT | M_ZERO);
334 D("no memory for new queue");
338 set_oid(&q->ni.oid, DN_QUEUE, size);
339 if (fs->fs.flags & DN_QHT_HASH)
340 q->ni.fid = *(struct ipfw_flow_id *)key;
342 q->_si = template->_si;
345 if (fs->sched->fp->new_queue)
346 fs->sched->fp->new_queue(q);
349 /* call AQM init function after creating a queue*/
350 if (fs->aqmfp && fs->aqmfp->init)
351 if(fs->aqmfp->init(q))
352 D("unable to init AQM for fs %d", fs->fs.fs_nr);
354 dn_cfg.queue_count++;
360 * Notify schedulers that a queue is going away.
361 * If (flags & DN_DESTROY), also free the packets.
362 * The version for callbacks is called q_delete_cb().
365 dn_delete_queue(struct dn_queue *q, int flags)
367 struct dn_fsk *fs = q->fs;
370 /* clean up AQM status for queue 'q'
371 * cleanup here is called just with MULTIQUEUE
373 if (fs && fs->aqmfp && fs->aqmfp->cleanup)
374 fs->aqmfp->cleanup(q);
376 // D("fs %p si %p\n", fs, q->_si);
377 /* notify the parent scheduler that the queue is going away */
378 if (fs && fs->sched->fp->free_queue)
379 fs->sched->fp->free_queue(q);
382 if (flags & DN_DESTROY) {
384 dn_free_pkts(q->mq.head);
385 bzero(q, sizeof(*q)); // safety
387 dn_cfg.queue_count--;
392 q_delete_cb(void *q, void *arg)
394 int flags = (int)(uintptr_t)arg;
395 dn_delete_queue(q, flags);
396 return (flags & DN_DESTROY) ? DNHT_SCAN_DEL : 0;
400 * calls dn_delete_queue/q_delete_cb on all queues,
401 * which notifies the parent scheduler and possibly drains packets.
402 * flags & DN_DESTROY: drains queues and destroy qht;
405 qht_delete(struct dn_fsk *fs, int flags)
407 ND("fs %d start flags %d qht %p",
408 fs->fs.fs_nr, flags, fs->qht);
411 if (fs->fs.flags & DN_QHT_HASH) {
412 dn_ht_scan(fs->qht, q_delete_cb, (void *)(uintptr_t)flags);
413 if (flags & DN_DESTROY) {
414 dn_ht_free(fs->qht, 0);
418 dn_delete_queue((struct dn_queue *)(fs->qht), flags);
419 if (flags & DN_DESTROY)
425 * Find and possibly create the queue for a MULTIQUEUE scheduler.
426 * We never call it for !MULTIQUEUE (the queue is in the sch_inst).
429 ipdn_q_find(struct dn_fsk *fs, struct dn_sch_inst *si,
430 struct ipfw_flow_id *id)
432 struct dn_queue template;
437 if (fs->fs.flags & DN_QHT_HASH) {
438 struct ipfw_flow_id masked_id;
439 if (fs->qht == NULL) {
440 fs->qht = dn_ht_init(NULL, fs->fs.buckets,
441 offsetof(struct dn_queue, q_next),
442 q_hash, q_match, q_new);
447 flow_id_mask(&fs->fsk_mask, &masked_id);
448 return dn_ht_find(fs->qht, (uintptr_t)&masked_id,
449 DNHT_INSERT, &template);
452 fs->qht = q_new(0, 0, &template);
453 return (struct dn_queue *)fs->qht;
456 /*--- end of queue hash table ---*/
458 /*--- support functions for the sch_inst hashtable ----
460 * These are hashed by flow-id
463 si_hash(uintptr_t key, int flags, void *arg)
465 /* compute the hash slot from the flow id */
466 struct ipfw_flow_id *id = (flags & DNHT_KEY_IS_OBJ) ?
467 &((struct dn_sch_inst *)key)->ni.fid :
468 (struct ipfw_flow_id *)key;
470 return flow_id_hash(id);
474 si_match(void *obj, uintptr_t key, int flags, void *arg)
476 struct dn_sch_inst *o = obj;
477 struct ipfw_flow_id *id2;
479 id2 = (flags & DNHT_KEY_IS_OBJ) ?
480 &((struct dn_sch_inst *)key)->ni.fid :
481 (struct ipfw_flow_id *)key;
482 return flow_id_cmp(&o->ni.fid, id2) == 0;
486 * create a new instance for the given 'key'
487 * Allocate memory for instance, delay line and scheduler private data.
490 si_new(uintptr_t key, int flags, void *arg)
492 struct dn_schk *s = arg;
493 struct dn_sch_inst *si;
494 int l = sizeof(*si) + s->fp->si_datalen;
496 si = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO);
500 /* Set length only for the part passed up to userland. */
501 set_oid(&si->ni.oid, DN_SCH_I, sizeof(struct dn_flow));
502 set_oid(&(si->dline.oid), DN_DELAY_LINE,
503 sizeof(struct delay_line));
504 /* mark si and dline as outside the event queue */
505 si->ni.oid.id = si->dline.oid.id = -1;
510 if (s->fp->new_sched && s->fp->new_sched(si)) {
511 D("new_sched error");
514 if (s->sch.flags & DN_HAVE_MASK)
515 si->ni.fid = *(struct ipfw_flow_id *)key;
518 /* init AQM status for !DN_MULTIQUEUE sched*/
519 if (!(s->fp->flags & DN_MULTIQUEUE))
520 if (s->fs->aqmfp && s->fs->aqmfp->init)
521 if(s->fs->aqmfp->init((struct dn_queue *)(si + 1))) {
522 D("unable to init AQM for fs %d", s->fs->fs.fs_nr);
532 bzero(si, sizeof(*si)); // safety
533 free(si, M_DUMMYNET);
539 * Callback from siht to delete all scheduler instances. Remove
540 * si and delay line from the system heap, destroy all queues.
541 * We assume that all flowset have been notified and do not
542 * point to us anymore.
545 si_destroy(void *_si, void *arg)
547 struct dn_sch_inst *si = _si;
548 struct dn_schk *s = si->sched;
549 struct delay_line *dl = &si->dline;
551 if (dl->oid.subtype) /* remove delay line from event heap */
552 heap_extract(&dn_cfg.evheap, dl);
553 dn_free_pkts(dl->mq.head); /* drain delay line */
554 if (si->kflags & DN_ACTIVE) /* remove si from event heap */
555 heap_extract(&dn_cfg.evheap, si);
558 /* clean up AQM status for !DN_MULTIQUEUE sched
559 * Note that all queues belong to fs were cleaned up in fsk_detach.
560 * When drain_scheduler is called s->fs and q->fs are pointing
561 * to a correct fs, so we can use fs in this case.
563 if (!(s->fp->flags & DN_MULTIQUEUE)) {
564 struct dn_queue *q = (struct dn_queue *)(si + 1);
565 if (q->aqm_status && q->fs->aqmfp)
566 if (q->fs->aqmfp->cleanup)
567 q->fs->aqmfp->cleanup(q);
570 if (s->fp->free_sched)
571 s->fp->free_sched(si);
572 bzero(si, sizeof(*si)); /* safety */
573 free(si, M_DUMMYNET);
575 return DNHT_SCAN_DEL;
579 * Find the scheduler instance for this packet. If we need to apply
580 * a mask, do on a local copy of the flow_id to preserve the original.
581 * Assume siht is always initialized if we have a mask.
584 ipdn_si_find(struct dn_schk *s, struct ipfw_flow_id *id)
587 if (s->sch.flags & DN_HAVE_MASK) {
588 struct ipfw_flow_id id_t = *id;
589 flow_id_mask(&s->sch.sched_mask, &id_t);
590 return dn_ht_find(s->siht, (uintptr_t)&id_t,
594 s->siht = si_new(0, 0, s);
595 return (struct dn_sch_inst *)s->siht;
598 /* callback to flush credit for the scheduler instance */
600 si_reset_credit(void *_si, void *arg)
602 struct dn_sch_inst *si = _si;
603 struct dn_link *p = &si->sched->link;
605 si->credit = p->burst + (dn_cfg.io_fast ? p->bandwidth : 0);
610 schk_reset_credit(struct dn_schk *s)
612 if (s->sch.flags & DN_HAVE_MASK)
613 dn_ht_scan(s->siht, si_reset_credit, NULL);
615 si_reset_credit(s->siht, NULL);
617 /*---- end of sch_inst hashtable ---------------------*/
619 /*-------------------------------------------------------
620 * flowset hash (fshash) support. Entries are hashed by fs_nr.
621 * New allocations are put in the fsunlinked list, from which
622 * they are removed when they point to a specific scheduler.
625 fsk_hash(uintptr_t key, int flags, void *arg)
627 uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key :
628 ((struct dn_fsk *)key)->fs.fs_nr;
630 return ( (i>>8)^(i>>4)^i );
634 fsk_match(void *obj, uintptr_t key, int flags, void *arg)
636 struct dn_fsk *fs = obj;
637 int i = !(flags & DNHT_KEY_IS_OBJ) ? key :
638 ((struct dn_fsk *)key)->fs.fs_nr;
640 return (fs->fs.fs_nr == i);
644 fsk_new(uintptr_t key, int flags, void *arg)
648 fs = malloc(sizeof(*fs), M_DUMMYNET, M_NOWAIT | M_ZERO);
650 set_oid(&fs->fs.oid, DN_FS, sizeof(fs->fs));
652 fs->drain_bucket = 0;
653 SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain);
659 /* callback function for cleaning up AQM queue status belongs to a flowset
660 * connected to scheduler instance '_si' (for !DN_MULTIQUEUE only).
663 si_cleanup_q(void *_si, void *arg)
665 struct dn_sch_inst *si = _si;
667 if (!(si->sched->fp->flags & DN_MULTIQUEUE)) {
668 if (si->sched->fs->aqmfp && si->sched->fs->aqmfp->cleanup)
669 si->sched->fs->aqmfp->cleanup((struct dn_queue *) (si+1));
674 /* callback to clean up queue AQM status.*/
676 q_cleanup_q(void *_q, void *arg)
678 struct dn_queue *q = _q;
679 q->fs->aqmfp->cleanup(q);
683 /* Clean up all AQM queues status belongs to flowset 'fs' and then
684 * deconfig AQM for flowset 'fs'
687 aqm_cleanup_deconfig_fs(struct dn_fsk *fs)
689 struct dn_sch_inst *si;
691 /* clean up AQM status for all queues for !DN_MULTIQUEUE sched*/
692 if (fs->fs.fs_nr > DN_MAX_ID) {
693 if (fs->sched && !(fs->sched->fp->flags & DN_MULTIQUEUE)) {
694 if (fs->sched->sch.flags & DN_HAVE_MASK)
695 dn_ht_scan(fs->sched->siht, si_cleanup_q, NULL);
697 /* single si i.e. no sched mask */
698 si = (struct dn_sch_inst *) fs->sched->siht;
699 if (si && fs->aqmfp && fs->aqmfp->cleanup)
700 fs->aqmfp->cleanup((struct dn_queue *) (si+1));
705 /* clean up AQM status for all queues for DN_MULTIQUEUE sched*/
706 if (fs->sched && fs->sched->fp->flags & DN_MULTIQUEUE && fs->qht) {
707 if (fs->fs.flags & DN_QHT_HASH)
708 dn_ht_scan(fs->qht, q_cleanup_q, NULL);
710 fs->aqmfp->cleanup((struct dn_queue *)(fs->qht));
714 if(fs->aqmcfg && fs->aqmfp && fs->aqmfp->deconfig)
715 fs->aqmfp->deconfig(fs);
720 * detach flowset from its current scheduler. Flags as follows:
721 * DN_DETACH removes from the fsk_list
722 * DN_DESTROY deletes individual queues
723 * DN_DELETE_FS destroys the flowset (otherwise goes in unlinked).
726 fsk_detach(struct dn_fsk *fs, int flags)
728 if (flags & DN_DELETE_FS)
730 ND("fs %d from sched %d flags %s %s %s",
731 fs->fs.fs_nr, fs->fs.sched_nr,
732 (flags & DN_DELETE_FS) ? "DEL_FS":"",
733 (flags & DN_DESTROY) ? "DEL":"",
734 (flags & DN_DETACH) ? "DET":"");
735 if (flags & DN_DETACH) { /* detach from the list */
736 struct dn_fsk_head *h;
737 h = fs->sched ? &fs->sched->fsk_list : &dn_cfg.fsu;
738 SLIST_REMOVE(h, fs, dn_fsk, sch_chain);
740 /* Free the RED parameters, they will be recomputed on
741 * subsequent attach if needed.
744 free(fs->w_q_lookup, M_DUMMYNET);
745 fs->w_q_lookup = NULL;
746 qht_delete(fs, flags);
748 aqm_cleanup_deconfig_fs(fs);
751 if (fs->sched && fs->sched->fp->free_fsk)
752 fs->sched->fp->free_fsk(fs);
754 if (flags & DN_DELETE_FS) {
755 bzero(fs, sizeof(*fs)); /* safety */
756 free(fs, M_DUMMYNET);
759 SLIST_INSERT_HEAD(&dn_cfg.fsu, fs, sch_chain);
764 * Detach or destroy all flowsets in a list.
765 * flags specifies what to do:
766 * DN_DESTROY: flush all queues
767 * DN_DELETE_FS: DN_DESTROY + destroy flowset
768 * DN_DELETE_FS implies DN_DESTROY
771 fsk_detach_list(struct dn_fsk_head *h, int flags)
774 int n = 0; /* only for stats */
776 ND("head %p flags %x", h, flags);
777 while ((fs = SLIST_FIRST(h))) {
778 SLIST_REMOVE_HEAD(h, sch_chain);
780 fsk_detach(fs, flags);
782 ND("done %d flowsets", n);
786 * called on 'queue X delete' -- removes the flowset from fshash,
787 * deletes all queues for the flowset, and removes the flowset.
790 delete_fs(int i, int locked)
797 fs = dn_ht_find(dn_cfg.fshash, i, DNHT_REMOVE, NULL);
798 ND("fs %d found %p", i, fs);
800 fsk_detach(fs, DN_DETACH | DN_DELETE_FS);
809 /*----- end of flowset hashtable support -------------*/
811 /*------------------------------------------------------------
812 * Scheduler hash. When searching by index we pass sched_nr,
813 * otherwise we pass struct dn_sch * which is the first field in
814 * struct dn_schk so we can cast between the two. We use this trick
815 * because in the create phase (but it should be fixed).
818 schk_hash(uintptr_t key, int flags, void *_arg)
820 uint32_t i = !(flags & DNHT_KEY_IS_OBJ) ? key :
821 ((struct dn_schk *)key)->sch.sched_nr;
822 return ( (i>>8)^(i>>4)^i );
826 schk_match(void *obj, uintptr_t key, int flags, void *_arg)
828 struct dn_schk *s = (struct dn_schk *)obj;
829 int i = !(flags & DNHT_KEY_IS_OBJ) ? key :
830 ((struct dn_schk *)key)->sch.sched_nr;
831 return (s->sch.sched_nr == i);
835 * Create the entry and intialize with the sched hash if needed.
836 * Leave s->fp unset so we can tell whether a dn_ht_find() returns
837 * a new object or a previously existing one.
840 schk_new(uintptr_t key, int flags, void *arg)
842 struct schk_new_arg *a = arg;
844 int l = sizeof(*s) +a->fp->schk_datalen;
846 s = malloc(l, M_DUMMYNET, M_NOWAIT | M_ZERO);
849 set_oid(&s->link.oid, DN_LINK, sizeof(s->link));
850 s->sch = *a->sch; // copy initial values
851 s->link.link_nr = s->sch.sched_nr;
852 SLIST_INIT(&s->fsk_list);
853 /* initialize the hash table or create the single instance */
854 s->fp = a->fp; /* si_new needs this */
856 if (s->sch.flags & DN_HAVE_MASK) {
857 s->siht = dn_ht_init(NULL, s->sch.buckets,
858 offsetof(struct dn_sch_inst, si_next),
859 si_hash, si_match, si_new);
860 if (s->siht == NULL) {
865 s->fp = NULL; /* mark as a new scheduler */
871 * Callback for sched delete. Notify all attached flowsets to
872 * detach from the scheduler, destroy the internal flowset, and
873 * all instances. The scheduler goes away too.
874 * arg is 0 (only detach flowsets and destroy instances)
875 * DN_DESTROY (detach & delete queues, delete schk)
876 * or DN_DELETE_FS (delete queues and flowsets, delete schk)
879 schk_delete_cb(void *obj, void *arg)
881 struct dn_schk *s = obj;
884 ND("sched %d arg %s%s",
886 a&DN_DESTROY ? "DEL ":"",
887 a&DN_DELETE_FS ? "DEL_FS":"");
889 fsk_detach_list(&s->fsk_list, arg ? DN_DESTROY : 0);
890 /* no more flowset pointing to us now */
891 if (s->sch.flags & DN_HAVE_MASK) {
892 dn_ht_scan(s->siht, si_destroy, NULL);
893 dn_ht_free(s->siht, 0);
895 si_destroy(s->siht, NULL);
897 free(s->profile, M_DUMMYNET);
903 bzero(s, sizeof(*s)); // safety
904 free(obj, M_DUMMYNET);
906 return DNHT_SCAN_DEL;
910 * called on a 'sched X delete' command. Deletes a single scheduler.
911 * This is done by removing from the schedhash, unlinking all
912 * flowsets and deleting their traffic.
919 s = dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL);
923 delete_fs(i + DN_MAX_ID, 1); /* first delete internal fs */
924 /* then detach flowsets, delete traffic */
925 schk_delete_cb(s, (void*)(uintptr_t)DN_DESTROY);
928 /*--- end of schk hashtable support ---*/
931 copy_obj(char **start, char *end, void *_o, const char *msg, int i)
933 struct dn_id *o = _o;
934 int have = end - *start;
936 if (have < o->len || o->len == 0 || o->type == 0) {
937 D("(WARN) type %d %s %d have %d need %d",
938 o->type, msg, i, have, o->len);
941 ND("type %d %s %d len %d", o->type, msg, i, o->len);
942 bcopy(_o, *start, o->len);
943 if (o->type == DN_LINK) {
944 /* Adjust burst parameter for link */
945 struct dn_link *l = (struct dn_link *)*start;
946 l->burst = div64(l->burst, 8 * hz);
947 l->delay = l->delay * 1000 / hz;
948 } else if (o->type == DN_SCH) {
949 /* Set id->id to the number of instances */
950 struct dn_schk *s = _o;
951 struct dn_id *id = (struct dn_id *)(*start);
952 id->id = (s->sch.flags & DN_HAVE_MASK) ?
953 dn_ht_entries(s->siht) : (s->siht ? 1 : 0);
959 /* Specific function to copy a queue.
960 * Copies only the user-visible part of a queue (which is in
961 * a struct dn_flow), and sets len accordingly.
964 copy_obj_q(char **start, char *end, void *_o, const char *msg, int i)
966 struct dn_id *o = _o;
967 int have = end - *start;
968 int len = sizeof(struct dn_flow); /* see above comment */
970 if (have < len || o->len == 0 || o->type != DN_QUEUE) {
971 D("ERROR type %d %s %d have %d need %d",
972 o->type, msg, i, have, len);
975 ND("type %d %s %d len %d", o->type, msg, i, len);
976 bcopy(_o, *start, len);
977 ((struct dn_id*)(*start))->len = len;
983 copy_q_cb(void *obj, void *arg)
985 struct dn_queue *q = obj;
986 struct copy_args *a = arg;
987 struct dn_flow *ni = (struct dn_flow *)(*a->start);
988 if (copy_obj_q(a->start, a->end, &q->ni, "queue", -1))
989 return DNHT_SCAN_END;
990 ni->oid.type = DN_FLOW; /* override the DN_QUEUE */
991 ni->oid.id = si_hash((uintptr_t)&ni->fid, 0, NULL);
996 copy_q(struct copy_args *a, struct dn_fsk *fs, int flags)
1000 if (fs->fs.flags & DN_QHT_HASH)
1001 dn_ht_scan(fs->qht, copy_q_cb, a);
1003 copy_q_cb(fs->qht, a);
1008 * This routine only copies the initial part of a profile ? XXX
1011 copy_profile(struct copy_args *a, struct dn_profile *p)
1013 int have = a->end - *a->start;
1014 /* XXX here we check for max length */
1015 int profile_len = sizeof(struct dn_profile) -
1016 ED_MAX_SAMPLES_NO*sizeof(int);
1020 if (have < profile_len) {
1021 D("error have %d need %d", have, profile_len);
1024 bcopy(p, *a->start, profile_len);
1025 ((struct dn_id *)(*a->start))->len = profile_len;
1026 *a->start += profile_len;
1031 copy_flowset(struct copy_args *a, struct dn_fsk *fs, int flags)
1033 struct dn_fs *ufs = (struct dn_fs *)(*a->start);
1036 ND("flowset %d", fs->fs.fs_nr);
1037 if (copy_obj(a->start, a->end, &fs->fs, "flowset", fs->fs.fs_nr))
1038 return DNHT_SCAN_END;
1039 ufs->oid.id = (fs->fs.flags & DN_QHT_HASH) ?
1040 dn_ht_entries(fs->qht) : (fs->qht ? 1 : 0);
1041 if (flags) { /* copy queues */
1048 copy_si_cb(void *obj, void *arg)
1050 struct dn_sch_inst *si = obj;
1051 struct copy_args *a = arg;
1052 struct dn_flow *ni = (struct dn_flow *)(*a->start);
1053 if (copy_obj(a->start, a->end, &si->ni, "inst",
1054 si->sched->sch.sched_nr))
1055 return DNHT_SCAN_END;
1056 ni->oid.type = DN_FLOW; /* override the DN_SCH_I */
1057 ni->oid.id = si_hash((uintptr_t)si, DNHT_KEY_IS_OBJ, NULL);
1062 copy_si(struct copy_args *a, struct dn_schk *s, int flags)
1064 if (s->sch.flags & DN_HAVE_MASK)
1065 dn_ht_scan(s->siht, copy_si_cb, a);
1067 copy_si_cb(s->siht, a);
1072 * compute a list of children of a scheduler and copy up
1075 copy_fsk_list(struct copy_args *a, struct dn_schk *s, int flags)
1081 int n = 0, space = sizeof(*o);
1082 SLIST_FOREACH(fs, &s->fsk_list, sch_chain) {
1083 if (fs->fs.fs_nr < DN_MAX_ID)
1086 space += n * sizeof(uint32_t);
1087 DX(3, "sched %d has %d flowsets", s->sch.sched_nr, n);
1088 if (a->end - *(a->start) < space)
1089 return DNHT_SCAN_END;
1090 o = (struct dn_id *)(*(a->start));
1092 *a->start += o->len;
1094 p = (uint32_t *)(o+1);
1095 SLIST_FOREACH(fs, &s->fsk_list, sch_chain)
1096 if (fs->fs.fs_nr < DN_MAX_ID)
1097 *p++ = fs->fs.fs_nr;
1102 copy_data_helper(void *_o, void *_arg)
1104 struct copy_args *a = _arg;
1105 uint32_t *r = a->extra->r; /* start of first range */
1106 uint32_t *lim; /* first invalid pointer */
1109 lim = (uint32_t *)((char *)(a->extra) + a->extra->o.len);
1111 if (a->type == DN_LINK || a->type == DN_SCH) {
1112 /* pipe|sched show, we receive a dn_schk */
1113 struct dn_schk *s = _o;
1115 n = s->sch.sched_nr;
1116 if (a->type == DN_SCH && n >= DN_MAX_ID)
1117 return 0; /* not a scheduler */
1118 if (a->type == DN_LINK && n <= DN_MAX_ID)
1119 return 0; /* not a pipe */
1121 /* see if the object is within one of our ranges */
1122 for (;r < lim; r += 2) {
1123 if (n < r[0] || n > r[1])
1125 /* Found a valid entry, copy and we are done */
1126 if (a->flags & DN_C_LINK) {
1127 if (copy_obj(a->start, a->end,
1128 &s->link, "link", n))
1129 return DNHT_SCAN_END;
1130 if (copy_profile(a, s->profile))
1131 return DNHT_SCAN_END;
1132 if (copy_flowset(a, s->fs, 0))
1133 return DNHT_SCAN_END;
1135 if (a->flags & DN_C_SCH) {
1136 if (copy_obj(a->start, a->end,
1137 &s->sch, "sched", n))
1138 return DNHT_SCAN_END;
1139 /* list all attached flowsets */
1140 if (copy_fsk_list(a, s, 0))
1141 return DNHT_SCAN_END;
1143 if (a->flags & DN_C_FLOW)
1147 } else if (a->type == DN_FS) {
1148 /* queue show, skip internal flowsets */
1149 struct dn_fsk *fs = _o;
1154 /* see if the object is within one of our ranges */
1155 for (;r < lim; r += 2) {
1156 if (n < r[0] || n > r[1])
1158 if (copy_flowset(a, fs, 0))
1159 return DNHT_SCAN_END;
1161 break; /* we are done */
1167 static inline struct dn_schk *
1168 locate_scheduler(int i)
1170 return dn_ht_find(dn_cfg.schedhash, i, 0, NULL);
1174 * red parameters are in fixed point arithmetic.
1177 config_red(struct dn_fsk *fs)
1179 int64_t s, idle, weight, w0;
1182 fs->w_q = fs->fs.w_q;
1183 fs->max_p = fs->fs.max_p;
1185 /* Doing stuff that was in userland */
1186 i = fs->sched->link.bandwidth;
1188 hz * dn_cfg.red_avg_pkt_size * 8 * SCALE(1) / i;
1190 idle = div64((s * 3) , fs->w_q); /* s, fs->w_q scaled; idle not scaled */
1191 fs->lookup_step = div64(idle , dn_cfg.red_lookup_depth);
1192 /* fs->lookup_step not scaled, */
1193 if (!fs->lookup_step)
1194 fs->lookup_step = 1;
1195 w0 = weight = SCALE(1) - fs->w_q; //fs->w_q scaled
1197 for (t = fs->lookup_step; t > 1; --t)
1198 weight = SCALE_MUL(weight, w0);
1199 fs->lookup_weight = (int)(weight); // scaled
1201 /* Now doing stuff that was in kerneland */
1202 fs->min_th = SCALE(fs->fs.min_th);
1203 fs->max_th = SCALE(fs->fs.max_th);
1205 if (fs->fs.max_th == fs->fs.min_th)
1206 fs->c_1 = fs->max_p;
1208 fs->c_1 = SCALE((int64_t)(fs->max_p)) / (fs->fs.max_th - fs->fs.min_th);
1209 fs->c_2 = SCALE_MUL(fs->c_1, SCALE(fs->fs.min_th));
1211 if (fs->fs.flags & DN_IS_GENTLE_RED) {
1212 fs->c_3 = (SCALE(1) - fs->max_p) / fs->fs.max_th;
1213 fs->c_4 = SCALE(1) - 2 * fs->max_p;
1216 /* If the lookup table already exist, free and create it again. */
1217 if (fs->w_q_lookup) {
1218 free(fs->w_q_lookup, M_DUMMYNET);
1219 fs->w_q_lookup = NULL;
1221 if (dn_cfg.red_lookup_depth == 0) {
1222 printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth"
1224 fs->fs.flags &= ~DN_IS_RED;
1225 fs->fs.flags &= ~DN_IS_GENTLE_RED;
1228 fs->lookup_depth = dn_cfg.red_lookup_depth;
1229 fs->w_q_lookup = (u_int *)malloc(fs->lookup_depth * sizeof(int),
1230 M_DUMMYNET, M_NOWAIT);
1231 if (fs->w_q_lookup == NULL) {
1232 printf("dummynet: sorry, cannot allocate red lookup table\n");
1233 fs->fs.flags &= ~DN_IS_RED;
1234 fs->fs.flags &= ~DN_IS_GENTLE_RED;
1238 /* Fill the lookup table with (1 - w_q)^x */
1239 fs->w_q_lookup[0] = SCALE(1) - fs->w_q;
1241 for (i = 1; i < fs->lookup_depth; i++)
1243 SCALE_MUL(fs->w_q_lookup[i - 1], fs->lookup_weight);
1245 if (dn_cfg.red_avg_pkt_size < 1)
1246 dn_cfg.red_avg_pkt_size = 512;
1247 fs->avg_pkt_size = dn_cfg.red_avg_pkt_size;
1248 if (dn_cfg.red_max_pkt_size < 1)
1249 dn_cfg.red_max_pkt_size = 1500;
1250 fs->max_pkt_size = dn_cfg.red_max_pkt_size;
1255 /* Scan all flowset attached to this scheduler and update red */
1257 update_red(struct dn_schk *s)
1260 SLIST_FOREACH(fs, &s->fsk_list, sch_chain) {
1261 if (fs && (fs->fs.flags & DN_IS_RED))
1266 /* attach flowset to scheduler s, possibly requeue */
1268 fsk_attach(struct dn_fsk *fs, struct dn_schk *s)
1270 ND("remove fs %d from fsunlinked, link to sched %d",
1271 fs->fs.fs_nr, s->sch.sched_nr);
1272 SLIST_REMOVE(&dn_cfg.fsu, fs, dn_fsk, sch_chain);
1274 SLIST_INSERT_HEAD(&s->fsk_list, fs, sch_chain);
1277 /* XXX compute fsk_mask */
1278 fs->fsk_mask = fs->fs.flow_mask;
1279 if (fs->sched->sch.flags & DN_HAVE_MASK)
1280 flow_id_or(&fs->sched->sch.sched_mask, &fs->fsk_mask);
1283 * we must drain qht according to the old
1284 * type, and reinsert according to the new one.
1285 * The requeue is complex -- in general we need to
1286 * reclassify every single packet.
1287 * For the time being, let's hope qht is never set
1288 * when we reach this point.
1290 D("XXX TODO requeue from fs %d to sch %d",
1291 fs->fs.fs_nr, s->sch.sched_nr);
1294 /* set the new type for qht */
1295 if (nonzero_mask(&fs->fsk_mask))
1296 fs->fs.flags |= DN_QHT_HASH;
1298 fs->fs.flags &= ~DN_QHT_HASH;
1300 /* XXX config_red() can fail... */
1301 if (fs->fs.flags & DN_IS_RED)
1305 /* update all flowsets which may refer to this scheduler */
1307 update_fs(struct dn_schk *s)
1309 struct dn_fsk *fs, *tmp;
1311 SLIST_FOREACH_SAFE(fs, &dn_cfg.fsu, sch_chain, tmp) {
1312 if (s->sch.sched_nr != fs->fs.sched_nr) {
1313 D("fs %d for sch %d not %d still unlinked",
1314 fs->fs.fs_nr, fs->fs.sched_nr,
1323 /* Retrieve AQM configurations to ipfw userland
1326 get_aqm_parms(struct sockopt *sopt)
1328 struct dn_extra_parms *ep;
1330 size_t sopt_valsize;
1333 sopt_valsize = sopt->sopt_valsize;
1335 if (sopt->sopt_valsize < l) {
1336 D("bad len sopt->sopt_valsize %d len %d",
1337 (int) sopt->sopt_valsize , l);
1341 ep = malloc(l, M_DUMMYNET, M_WAITOK);
1347 err = sooptcopyin(sopt, ep, l, l);
1350 sopt->sopt_valsize = sopt_valsize;
1351 if (ep->oid.len < l) {
1356 fs = dn_ht_find(dn_cfg.fshash, ep->nr, 0, NULL);
1358 D("fs %d not found", ep->nr);
1363 if (fs->aqmfp && fs->aqmfp->getconfig) {
1364 if(fs->aqmfp->getconfig(fs, ep)) {
1365 D("Error while trying to get AQM params");
1370 err = sooptcopyout(sopt, ep, l);
1374 free(ep, M_DUMMYNET);
1378 /* Retrieve AQM configurations to ipfw userland
1381 get_sched_parms(struct sockopt *sopt)
1383 struct dn_extra_parms *ep;
1384 struct dn_schk *schk;
1385 size_t sopt_valsize;
1388 sopt_valsize = sopt->sopt_valsize;
1390 if (sopt->sopt_valsize < l) {
1391 D("bad len sopt->sopt_valsize %d len %d",
1392 (int) sopt->sopt_valsize , l);
1396 ep = malloc(l, M_DUMMYNET, M_WAITOK);
1402 err = sooptcopyin(sopt, ep, l, l);
1405 sopt->sopt_valsize = sopt_valsize;
1406 if (ep->oid.len < l) {
1411 schk = locate_scheduler(ep->nr);
1413 D("sched %d not found", ep->nr);
1418 if (schk->fp && schk->fp->getconfig) {
1419 if(schk->fp->getconfig(schk, ep)) {
1420 D("Error while trying to get sched params");
1425 err = sooptcopyout(sopt, ep, l);
1428 free(ep, M_DUMMYNET);
1433 /* Configure AQM for flowset 'fs'.
1434 * extra parameters are passed from userland.
1437 config_aqm(struct dn_fsk *fs, struct dn_extra_parms *ep, int busy)
1442 /* no configurations */
1448 /* no AQM for this flowset*/
1449 if (!strcmp(ep->name,"")) {
1453 if (ep->oid.len < sizeof(*ep)) {
1454 D("short aqm len %d", ep->oid.len);
1460 D("Unable to configure flowset, flowset busy!");
1465 /* deconfigure old aqm if exist */
1466 if (fs->aqmcfg && fs->aqmfp && fs->aqmfp->deconfig) {
1467 aqm_cleanup_deconfig_fs(fs);
1470 if (!(fs->aqmfp = find_aqm_type(0, ep->name))) {
1471 D("AQM functions not found for type %s!", ep->name);
1472 fs->fs.flags &= ~DN_IS_AQM;
1476 fs->fs.flags |= DN_IS_AQM;
1478 if (ep->oid.subtype != DN_AQM_PARAMS) {
1484 if (fs->aqmfp->config) {
1485 err = fs->aqmfp->config(fs, ep, ep->oid.len);
1487 D("Unable to configure AQM for FS %d", fs->fs.fs_nr );
1488 fs->fs.flags &= ~DN_IS_AQM;
1500 * Configuration -- to preserve backward compatibility we use
1501 * the following scheme (N is 65536)
1502 * NUMBER SCHED LINK FLOWSET
1503 * 1 .. N-1 (1)WFQ (2)WFQ (3)queue
1504 * N+1 .. 2N-1 (4)FIFO (5)FIFO (6)FIFO for sched 1..N-1
1505 * 2N+1 .. 3N-1 -- -- (7)FIFO for sched N+1..2N-1
1507 * "pipe i config" configures #1, #2 and #3
1508 * "sched i config" configures #1 and possibly #6
1509 * "queue i config" configures #3
1510 * #1 is configured with 'pipe i config' or 'sched i config'
1511 * #2 is configured with 'pipe i config', and created if not
1512 * existing with 'sched i config'
1513 * #3 is configured with 'queue i config'
1514 * #4 is automatically configured after #1, can only be FIFO
1515 * #5 is automatically configured after #2
1516 * #6 is automatically created when #1 is !MULTIQUEUE,
1517 * and can be updated.
1518 * #7 is automatically configured after #2
1522 * configure a link (and its FIFO instance)
1525 config_link(struct dn_link *p, struct dn_id *arg)
1529 if (p->oid.len != sizeof(*p)) {
1530 D("invalid pipe len %d", p->oid.len);
1534 if (i <= 0 || i >= DN_MAX_ID)
1537 * The config program passes parameters as follows:
1538 * bw = bits/second (0 means no limits),
1539 * delay = ms, must be translated into ticks.
1540 * qsize = slots/bytes
1543 p->delay = (p->delay * hz) / 1000;
1544 /* Scale burst size: bytes -> bits * hz */
1548 /* do it twice, base link and FIFO link */
1549 for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) {
1550 struct dn_schk *s = locate_scheduler(i);
1553 D("sched %d not found", i);
1556 /* remove profile if exists */
1558 free(s->profile, M_DUMMYNET);
1561 /* copy all parameters */
1562 s->link.oid = p->oid;
1563 s->link.link_nr = i;
1564 s->link.delay = p->delay;
1565 if (s->link.bandwidth != p->bandwidth) {
1566 /* XXX bandwidth changes, need to update red params */
1567 s->link.bandwidth = p->bandwidth;
1570 s->link.burst = p->burst;
1571 schk_reset_credit(s);
1579 * configure a flowset. Can be called from inside with locked=1,
1581 static struct dn_fsk *
1582 config_fs(struct dn_fs *nfs, struct dn_id *arg, int locked)
1587 if (nfs->oid.len != sizeof(*nfs)) {
1588 D("invalid flowset len %d", nfs->oid.len);
1592 if (i <= 0 || i >= 3*DN_MAX_ID)
1594 ND("flowset %d", i);
1595 /* XXX other sanity checks */
1596 if (nfs->flags & DN_QSIZE_BYTES) {
1597 ipdn_bound_var(&nfs->qsize, 16384,
1598 1500, dn_cfg.byte_limit, NULL); // "queue byte size");
1600 ipdn_bound_var(&nfs->qsize, 50,
1601 1, dn_cfg.slot_limit, NULL); // "queue slot size");
1603 if (nfs->flags & DN_HAVE_MASK) {
1604 /* make sure we have some buckets */
1605 ipdn_bound_var((int *)&nfs->buckets, dn_cfg.hash_size,
1606 1, dn_cfg.max_hash_size, "flowset buckets");
1608 nfs->buckets = 1; /* we only need 1 */
1612 do { /* exit with break when done */
1614 int flags = nfs->sched_nr ? DNHT_INSERT : 0;
1616 int oldc = dn_cfg.fsk_count;
1617 fs = dn_ht_find(dn_cfg.fshash, i, flags, NULL);
1619 D("missing sched for flowset %d", i);
1622 /* grab some defaults from the existing one */
1623 if (nfs->sched_nr == 0) /* reuse */
1624 nfs->sched_nr = fs->fs.sched_nr;
1625 for (j = 0; j < sizeof(nfs->par)/sizeof(nfs->par[0]); j++) {
1626 if (nfs->par[j] == -1) /* reuse */
1627 nfs->par[j] = fs->fs.par[j];
1629 if (bcmp(&fs->fs, nfs, sizeof(*nfs)) == 0) {
1630 ND("flowset %d unchanged", i);
1632 /* reconfigure AQM as the parameters can be changed.
1633 * we consider the flowsetis busy if it has scheduler instance(s)
1635 s = locate_scheduler(nfs->sched_nr);
1636 config_aqm(fs, (struct dn_extra_parms *) arg,
1637 s != NULL && s->siht != NULL);
1639 break; /* no change, nothing to do */
1641 if (oldc != dn_cfg.fsk_count) /* new item */
1643 s = locate_scheduler(nfs->sched_nr);
1644 /* detach from old scheduler if needed, preserving
1645 * queues if we need to reattach. Then update the
1646 * configuration, and possibly attach to the new sched.
1648 DX(2, "fs %d changed sched %d@%p to %d@%p",
1650 fs->fs.sched_nr, fs->sched, nfs->sched_nr, s);
1652 int flags = s ? DN_DETACH : (DN_DETACH | DN_DESTROY);
1653 flags |= DN_DESTROY; /* XXX temporary */
1654 fsk_detach(fs, flags);
1656 fs->fs = *nfs; /* copy configuration */
1659 config_aqm(fs, (struct dn_extra_parms *) arg, s != NULL && s->siht != NULL);
1670 * config/reconfig a scheduler and its FIFO variant.
1671 * For !MULTIQUEUE schedulers, also set up the flowset.
1673 * On reconfigurations (detected because s->fp is set),
1674 * detach existing flowsets preserving traffic, preserve link,
1675 * and delete the old scheduler creating a new one.
1678 config_sched(struct dn_sch *_nsch, struct dn_id *arg)
1681 struct schk_new_arg a; /* argument for schk_new */
1683 struct dn_link p; /* copy of oldlink */
1684 struct dn_profile *pf = NULL; /* copy of old link profile */
1685 /* Used to preserv mask parameter */
1686 struct ipfw_flow_id new_mask;
1687 int new_buckets = 0;
1693 if (a.sch->oid.len != sizeof(*a.sch)) {
1694 D("bad sched len %d", a.sch->oid.len);
1697 i = a.sch->sched_nr;
1698 if (i <= 0 || i >= DN_MAX_ID)
1700 /* make sure we have some buckets */
1701 if (a.sch->flags & DN_HAVE_MASK)
1702 ipdn_bound_var((int *)&a.sch->buckets, dn_cfg.hash_size,
1703 1, dn_cfg.max_hash_size, "sched buckets");
1704 /* XXX other sanity checks */
1705 bzero(&p, sizeof(p));
1707 pipe_cmd = a.sch->flags & DN_PIPE_CMD;
1708 a.sch->flags &= ~DN_PIPE_CMD; //XXX do it even if is not set?
1710 /* Copy mask parameter */
1711 new_mask = a.sch->sched_mask;
1712 new_buckets = a.sch->buckets;
1713 new_flags = a.sch->flags;
1716 again: /* run twice, for wfq and fifo */
1718 * lookup the type. If not supplied, use the previous one
1719 * or default to WF2Q+. Otherwise, return an error.
1722 a.fp = find_sched_type(a.sch->oid.subtype, a.sch->name);
1724 /* found. Lookup or create entry */
1725 s = dn_ht_find(dn_cfg.schedhash, i, DNHT_INSERT, &a);
1726 } else if (a.sch->oid.subtype == 0 && !a.sch->name[0]) {
1727 /* No type. search existing s* or retry with WF2Q+ */
1728 s = dn_ht_find(dn_cfg.schedhash, i, 0, &a);
1731 /* Scheduler exists, skip to FIFO scheduler
1732 * if command was pipe config...
1737 /* New scheduler, create a wf2q+ with no mask
1738 * if command was pipe config...
1741 /* clear mask parameter */
1742 bzero(&a.sch->sched_mask, sizeof(new_mask));
1744 a.sch->flags &= ~DN_HAVE_MASK;
1746 a.sch->oid.subtype = DN_SCHED_WF2QP;
1750 D("invalid scheduler type %d %s",
1751 a.sch->oid.subtype, a.sch->name);
1755 /* normalize name and subtype */
1756 a.sch->oid.subtype = a.fp->type;
1757 bzero(a.sch->name, sizeof(a.sch->name));
1758 strlcpy(a.sch->name, a.fp->name, sizeof(a.sch->name));
1760 D("cannot allocate scheduler %d", i);
1763 /* restore existing link if any */
1766 if (!pf || pf->link_nr != p.link_nr) { /* no saved value */
1767 s->profile = NULL; /* XXX maybe not needed */
1769 s->profile = malloc(sizeof(struct dn_profile),
1770 M_DUMMYNET, M_NOWAIT | M_ZERO);
1771 if (s->profile == NULL) {
1772 D("cannot allocate profile");
1775 bcopy(pf, s->profile, sizeof(*pf));
1779 if (s->fp == NULL) {
1780 DX(2, "sched %d new type %s", i, a.fp->name);
1781 } else if (s->fp != a.fp ||
1782 bcmp(a.sch, &s->sch, sizeof(*a.sch)) ) {
1783 /* already existing. */
1784 DX(2, "sched %d type changed from %s to %s",
1785 i, s->fp->name, a.fp->name);
1786 DX(4, " type/sub %d/%d -> %d/%d",
1787 s->sch.oid.type, s->sch.oid.subtype,
1788 a.sch->oid.type, a.sch->oid.subtype);
1789 if (s->link.link_nr == 0)
1790 D("XXX WARNING link 0 for sched %d", i);
1791 p = s->link; /* preserve link */
1792 if (s->profile) {/* preserve profile */
1794 pf = malloc(sizeof(*pf),
1795 M_DUMMYNET, M_NOWAIT | M_ZERO);
1796 if (pf) /* XXX should issue a warning otherwise */
1797 bcopy(s->profile, pf, sizeof(*pf));
1799 /* remove from the hash */
1800 dn_ht_find(dn_cfg.schedhash, i, DNHT_REMOVE, NULL);
1801 /* Detach flowsets, preserve queues. */
1802 // schk_delete_cb(s, NULL);
1803 // XXX temporarily, kill queues
1804 schk_delete_cb(s, (void *)DN_DESTROY);
1807 DX(4, "sched %d unchanged type %s", i, a.fp->name);
1809 /* complete initialization */
1813 // XXX schk_reset_credit(s);
1814 /* create the internal flowset if needed,
1815 * trying to reuse existing ones if available
1817 if (!(s->fp->flags & DN_MULTIQUEUE) && !s->fs) {
1818 s->fs = dn_ht_find(dn_cfg.fshash, i, 0, NULL);
1821 bzero(&fs, sizeof(fs));
1822 set_oid(&fs.oid, DN_FS, sizeof(fs));
1823 fs.fs_nr = i + DN_MAX_ID;
1825 s->fs = config_fs(&fs, NULL, 1 /* locked */);
1828 schk_delete_cb(s, (void *)DN_DESTROY);
1829 D("error creating internal fs for %d", i);
1833 /* call init function after the flowset is created */
1838 if (i < DN_MAX_ID) { /* now configure the FIFO instance */
1841 /* Restore mask parameter for FIFO */
1842 a.sch->sched_mask = new_mask;
1843 a.sch->buckets = new_buckets;
1844 a.sch->flags = new_flags;
1846 /* sched config shouldn't modify the FIFO scheduler */
1847 if (dn_ht_find(dn_cfg.schedhash, i, 0, &a) != NULL) {
1848 /* FIFO already exist, don't touch it */
1849 err = 0; /* and this is not an error */
1853 a.sch->sched_nr = i;
1854 a.sch->oid.subtype = DN_SCHED_FIFO;
1855 bzero(a.sch->name, sizeof(a.sch->name));
1862 free(pf, M_DUMMYNET);
1867 * attach a profile to a link
1870 config_profile(struct dn_profile *pf, struct dn_id *arg)
1873 int i, olen, err = 0;
1875 if (pf->oid.len < sizeof(*pf)) {
1876 D("short profile len %d", pf->oid.len);
1880 if (i <= 0 || i >= DN_MAX_ID)
1882 /* XXX other sanity checks */
1884 for (; i < 2*DN_MAX_ID; i += DN_MAX_ID) {
1885 s = locate_scheduler(i);
1893 * If we had a profile and the new one does not fit,
1894 * or it is deleted, then we need to free memory.
1896 if (s->profile && (pf->samples_no == 0 ||
1897 s->profile->oid.len < pf->oid.len)) {
1898 free(s->profile, M_DUMMYNET);
1901 if (pf->samples_no == 0)
1904 * new profile, possibly allocate memory
1907 if (s->profile == NULL)
1908 s->profile = malloc(pf->oid.len,
1909 M_DUMMYNET, M_NOWAIT | M_ZERO);
1910 if (s->profile == NULL) {
1911 D("no memory for profile %d", i);
1915 /* preserve larger length XXX double check */
1916 olen = s->profile->oid.len;
1917 if (olen < pf->oid.len)
1919 bcopy(pf, s->profile, pf->oid.len);
1920 s->profile->oid.len = olen;
1927 * Delete all objects:
1930 dummynet_flush(void)
1933 /* delete all schedulers and related links/queues/flowsets */
1934 dn_ht_scan(dn_cfg.schedhash, schk_delete_cb,
1935 (void *)(uintptr_t)DN_DELETE_FS);
1936 /* delete all remaining (unlinked) flowsets */
1937 DX(4, "still %d unlinked fs", dn_cfg.fsk_count);
1938 dn_ht_free(dn_cfg.fshash, DNHT_REMOVE);
1939 fsk_detach_list(&dn_cfg.fsu, DN_DELETE_FS);
1940 /* Reinitialize system heap... */
1941 heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id));
1945 * Main handler for configuration. We are guaranteed to be called
1946 * with an oid which is at least a dn_id.
1947 * - the first object is the command (config, delete, flush, ...)
1948 * - config_link must be issued after the corresponding config_sched
1949 * - parameters (DN_TXT) for an object must preceed the object
1950 * processed on a config_sched.
1953 do_config(void *p, int l)
1955 struct dn_id *next, *o;
1956 int err = 0, err2 = 0;
1957 struct dn_id *arg = NULL;
1961 if (o->id != DN_API_VERSION) {
1962 D("invalid api version got %d need %d",
1963 o->id, DN_API_VERSION);
1966 for (; l >= sizeof(*o); o = next) {
1967 struct dn_id *prev = arg;
1968 if (o->len < sizeof(*o) || l < o->len) {
1969 D("bad len o->len %d len %d", o->len, l);
1974 next = (struct dn_id *)((char *)o + o->len);
1978 D("cmd %d not implemented", o->type);
1981 #ifdef EMULATE_SYSCTL
1982 /* sysctl emulation.
1983 * if we recognize the command, jump to the correct
1984 * handler and return
1987 err = kesysctl_emu_set(p, l);
1991 case DN_CMD_CONFIG: /* simply a header */
1995 /* the argument is in the first uintptr_t after o */
1996 a = (uintptr_t *)(o+1);
1997 if (o->len < sizeof(*o) + sizeof(*a)) {
2001 switch (o->subtype) {
2003 /* delete base and derived schedulers */
2005 err = delete_schk(*a);
2006 err2 = delete_schk(*a + DN_MAX_ID);
2013 D("invalid delete type %d",
2019 err = (*a <1 || *a >= DN_MAX_ID) ?
2020 EINVAL : delete_fs(*a, 0) ;
2030 case DN_TEXT: /* store argument the next block */
2035 err = config_link((struct dn_link *)o, arg);
2038 err = config_profile((struct dn_profile *)o, arg);
2041 err = config_sched((struct dn_sch *)o, arg);
2044 err = (NULL==config_fs((struct dn_fs *)o, arg, 0));
2056 compute_space(struct dn_id *cmd, struct copy_args *a)
2058 int x = 0, need = 0;
2059 int profile_size = sizeof(struct dn_profile) -
2060 ED_MAX_SAMPLES_NO*sizeof(int);
2062 /* NOTE about compute space:
2063 * NP = dn_cfg.schk_count
2064 * NSI = dn_cfg.si_count
2065 * NF = dn_cfg.fsk_count
2066 * NQ = dn_cfg.queue_count
2068 * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler
2069 * link, scheduler template, flowset
2070 * integrated in scheduler and header
2072 * (NSI)*(dn_flow) all scheduler instance (includes
2073 * the queue instance)
2075 * (NP/2)*(dn_link + dn_sch + dn_id + dn_fs) only half scheduler
2076 * link, scheduler template, flowset
2077 * integrated in scheduler and header
2079 * (NSI * dn_flow) all scheduler instances
2080 * (NF * sizeof(uint_32)) space for flowset list linked to scheduler
2081 * (NQ * dn_queue) all queue [XXXfor now not listed]
2083 * (NF * dn_fs) all flowset
2084 * (NQ * dn_queue) all queues
2086 switch (cmd->subtype) {
2089 /* XXX where do LINK and SCH differ ? */
2090 /* 'ipfw sched show' could list all queues associated to
2091 * a scheduler. This feature for now is disabled
2093 case DN_LINK: /* pipe show */
2094 x = DN_C_LINK | DN_C_SCH | DN_C_FLOW;
2095 need += dn_cfg.schk_count *
2096 (sizeof(struct dn_fs) + profile_size) / 2;
2097 need += dn_cfg.fsk_count * sizeof(uint32_t);
2099 case DN_SCH: /* sched show */
2100 need += dn_cfg.schk_count *
2101 (sizeof(struct dn_fs) + profile_size) / 2;
2102 need += dn_cfg.fsk_count * sizeof(uint32_t);
2103 x = DN_C_SCH | DN_C_LINK | DN_C_FLOW;
2105 case DN_FS: /* queue show */
2106 x = DN_C_FS | DN_C_QUEUE;
2108 case DN_GET_COMPAT: /* compatibility mode */
2109 need = dn_compat_calc_size();
2114 need += dn_cfg.schk_count * sizeof(struct dn_sch) / 2;
2115 /* NOT also, each fs might be attached to a sched */
2116 need += dn_cfg.schk_count * sizeof(struct dn_id) / 2;
2119 need += dn_cfg.fsk_count * sizeof(struct dn_fs);
2120 if (x & DN_C_LINK) {
2121 need += dn_cfg.schk_count * sizeof(struct dn_link) / 2;
2124 * When exporting a queue to userland, only pass up the
2125 * struct dn_flow, which is the only visible part.
2129 need += dn_cfg.queue_count * sizeof(struct dn_flow);
2131 need += dn_cfg.si_count * (sizeof(struct dn_flow));
2136 * If compat != NULL dummynet_get is called in compatibility mode.
2137 * *compat will be the pointer to the buffer to pass to ipfw
2140 dummynet_get(struct sockopt *sopt, void **compat)
2142 int have, i, need, error;
2143 char *start = NULL, *buf;
2144 size_t sopt_valsize;
2147 struct copy_range r;
2148 int l = sizeof(struct dn_id);
2150 bzero(&a, sizeof(a));
2151 bzero(&r, sizeof(r));
2153 /* save and restore original sopt_valsize around copyin */
2154 sopt_valsize = sopt->sopt_valsize;
2159 /* copy at least an oid, and possibly a full object */
2160 error = sooptcopyin(sopt, cmd, sizeof(r), sizeof(*cmd));
2161 sopt->sopt_valsize = sopt_valsize;
2165 #ifdef EMULATE_SYSCTL
2166 /* sysctl emulation. */
2167 if (cmd->type == DN_SYSCTL_GET)
2168 return kesysctl_emu_get(sopt);
2170 if (l > sizeof(r)) {
2171 /* request larger than default, allocate buffer */
2172 cmd = malloc(l, M_DUMMYNET, M_WAITOK);
2173 error = sooptcopyin(sopt, cmd, l, l);
2174 sopt->sopt_valsize = sopt_valsize;
2178 } else { /* compatibility */
2180 cmd->type = DN_CMD_GET;
2181 cmd->len = sizeof(struct dn_id);
2182 cmd->subtype = DN_GET_COMPAT;
2183 // cmd->id = sopt_valsize;
2184 D("compatibility mode");
2188 /* get AQM params */
2189 if(cmd->subtype == DN_AQM_PARAMS) {
2190 error = get_aqm_parms(sopt);
2192 /* get Scheduler params */
2193 } else if (cmd->subtype == DN_SCH_PARAMS) {
2194 error = get_sched_parms(sopt);
2199 a.extra = (struct copy_range *)cmd;
2200 if (cmd->len == sizeof(*cmd)) { /* no range, create a default */
2201 uint32_t *rp = (uint32_t *)(cmd + 1);
2202 cmd->len += 2* sizeof(uint32_t);
2204 rp[1] = DN_MAX_ID - 1;
2205 if (cmd->subtype == DN_LINK) {
2210 /* Count space (under lock) and allocate (outside lock).
2211 * Exit with lock held if we manage to get enough buffer.
2212 * Try a few times then give up.
2214 for (have = 0, i = 0; i < 10; i++) {
2216 need = compute_space(cmd, &a);
2218 /* if there is a range, ignore value from compute_space() */
2219 if (l > sizeof(*cmd))
2220 need = sopt_valsize - sizeof(*cmd);
2227 need += sizeof(*cmd);
2234 free(start, M_DUMMYNET);
2236 if (need > sopt_valsize)
2240 start = malloc(have, M_DUMMYNET, M_WAITOK | M_ZERO);
2243 if (start == NULL) {
2248 error = sooptcopyout(sopt, cmd, sizeof(*cmd));
2252 ND("have %d:%d sched %d, %d:%d links %d, %d:%d flowsets %d, "
2253 "%d:%d si %d, %d:%d queues %d",
2254 dn_cfg.schk_count, sizeof(struct dn_sch), DN_SCH,
2255 dn_cfg.schk_count, sizeof(struct dn_link), DN_LINK,
2256 dn_cfg.fsk_count, sizeof(struct dn_fs), DN_FS,
2257 dn_cfg.si_count, sizeof(struct dn_flow), DN_SCH_I,
2258 dn_cfg.queue_count, sizeof(struct dn_queue), DN_QUEUE);
2259 sopt->sopt_valsize = sopt_valsize;
2260 a.type = cmd->subtype;
2262 if (compat == NULL) {
2263 bcopy(cmd, start, sizeof(*cmd));
2264 ((struct dn_id*)(start))->len = sizeof(struct dn_id);
2265 buf = start + sizeof(*cmd);
2269 a.end = start + have;
2270 /* start copying other objects */
2272 a.type = DN_COMPAT_PIPE;
2273 dn_ht_scan(dn_cfg.schedhash, copy_data_helper_compat, &a);
2274 a.type = DN_COMPAT_QUEUE;
2275 dn_ht_scan(dn_cfg.fshash, copy_data_helper_compat, &a);
2276 } else if (a.type == DN_FS) {
2277 dn_ht_scan(dn_cfg.fshash, copy_data_helper, &a);
2279 dn_ht_scan(dn_cfg.schedhash, copy_data_helper, &a);
2285 sopt->sopt_valsize = buf - start;
2286 /* free() is done by ip_dummynet_compat() */
2287 start = NULL; //XXX hack
2289 error = sooptcopyout(sopt, start, buf - start);
2292 if (cmd && cmd != &r.o)
2293 free(cmd, M_DUMMYNET);
2295 free(start, M_DUMMYNET);
2299 /* Callback called on scheduler instance to delete it if idle */
2301 drain_scheduler_cb(void *_si, void *arg)
2303 struct dn_sch_inst *si = _si;
2305 if ((si->kflags & DN_ACTIVE) || si->dline.mq.head != NULL)
2308 if (si->sched->fp->flags & DN_MULTIQUEUE) {
2309 if (si->q_count == 0)
2310 return si_destroy(si, NULL);
2313 } else { /* !DN_MULTIQUEUE */
2314 if ((si+1)->ni.length == 0)
2315 return si_destroy(si, NULL);
2319 return 0; /* unreachable */
2322 /* Callback called on scheduler to check if it has instances */
2324 drain_scheduler_sch_cb(void *_s, void *arg)
2326 struct dn_schk *s = _s;
2328 if (s->sch.flags & DN_HAVE_MASK) {
2329 dn_ht_scan_bucket(s->siht, &s->drain_bucket,
2330 drain_scheduler_cb, NULL);
2334 if (drain_scheduler_cb(s->siht, NULL) == DNHT_SCAN_DEL)
2341 /* Called every tick, try to delete a 'bucket' of scheduler */
2343 dn_drain_scheduler(void)
2345 dn_ht_scan_bucket(dn_cfg.schedhash, &dn_cfg.drain_sch,
2346 drain_scheduler_sch_cb, NULL);
2350 /* Callback called on queue to delete if it is idle */
2352 drain_queue_cb(void *_q, void *arg)
2354 struct dn_queue *q = _q;
2356 if (q->ni.length == 0) {
2357 dn_delete_queue(q, DN_DESTROY);
2358 return DNHT_SCAN_DEL; /* queue is deleted */
2361 return 0; /* queue isn't deleted */
2364 /* Callback called on flowset used to check if it has queues */
2366 drain_queue_fs_cb(void *_fs, void *arg)
2368 struct dn_fsk *fs = _fs;
2370 if (fs->fs.flags & DN_QHT_HASH) {
2371 /* Flowset has a hash table for queues */
2372 dn_ht_scan_bucket(fs->qht, &fs->drain_bucket,
2373 drain_queue_cb, NULL);
2376 /* No hash table for this flowset, null the pointer
2377 * if the queue is deleted
2380 if (drain_queue_cb(fs->qht, NULL) == DNHT_SCAN_DEL)
2387 /* Called every tick, try to delete a 'bucket' of queue */
2389 dn_drain_queue(void)
2391 /* scan a bucket of flowset */
2392 dn_ht_scan_bucket(dn_cfg.fshash, &dn_cfg.drain_fs,
2393 drain_queue_fs_cb, NULL);
2398 * Handler for the various dummynet socket options
2401 ip_dn_ctl(struct sockopt *sopt)
2406 error = priv_check(sopt->sopt_td, PRIV_NETINET_DUMMYNET);
2410 /* Disallow sets in really-really secure mode. */
2411 if (sopt->sopt_dir == SOPT_SET) {
2412 error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
2417 switch (sopt->sopt_name) {
2419 D("dummynet: unknown option %d", sopt->sopt_name);
2423 case IP_DUMMYNET_FLUSH:
2424 case IP_DUMMYNET_CONFIGURE:
2425 case IP_DUMMYNET_DEL: /* remove a pipe or queue */
2426 case IP_DUMMYNET_GET:
2427 D("dummynet: compat option %d", sopt->sopt_name);
2428 error = ip_dummynet_compat(sopt);
2432 if (sopt->sopt_dir == SOPT_GET) {
2433 error = dummynet_get(sopt, NULL);
2436 l = sopt->sopt_valsize;
2437 if (l < sizeof(struct dn_id) || l > 12000) {
2438 D("argument len %d invalid", l);
2441 p = malloc(l, M_TEMP, M_WAITOK); // XXX can it fail ?
2442 error = sooptcopyin(sopt, p, l, l);
2445 error = do_config(p, l);
2459 if (dn_cfg.init_done)
2461 printf("DUMMYNET %p with IPv6 initialized (100409)\n", curvnet);
2462 dn_cfg.init_done = 1;
2463 /* Set defaults here. MSVC does not accept initializers,
2464 * and this is also useful for vimages
2467 dn_cfg.slot_limit = 100; /* Foot shooting limit for queues. */
2468 dn_cfg.byte_limit = 1024 * 1024;
2471 /* RED parameters */
2472 dn_cfg.red_lookup_depth = 256; /* default lookup table depth */
2473 dn_cfg.red_avg_pkt_size = 512; /* default medium packet size */
2474 dn_cfg.red_max_pkt_size = 1500; /* default max packet size */
2477 dn_cfg.max_hash_size = 65536; /* max in the hash tables */
2478 dn_cfg.hash_size = 64; /* default hash size */
2480 /* create hash tables for schedulers and flowsets.
2481 * In both we search by key and by pointer.
2483 dn_cfg.schedhash = dn_ht_init(NULL, dn_cfg.hash_size,
2484 offsetof(struct dn_schk, schk_next),
2485 schk_hash, schk_match, schk_new);
2486 dn_cfg.fshash = dn_ht_init(NULL, dn_cfg.hash_size,
2487 offsetof(struct dn_fsk, fsk_next),
2488 fsk_hash, fsk_match, fsk_new);
2490 /* bucket index to drain object */
2491 dn_cfg.drain_fs = 0;
2492 dn_cfg.drain_sch = 0;
2494 heap_init(&dn_cfg.evheap, 16, offsetof(struct dn_id, id));
2495 SLIST_INIT(&dn_cfg.fsu);
2496 SLIST_INIT(&dn_cfg.schedlist);
2500 TASK_INIT(&dn_task, 0, dummynet_task, curvnet);
2501 dn_tq = taskqueue_create_fast("dummynet", M_WAITOK,
2502 taskqueue_thread_enqueue, &dn_tq);
2503 taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet");
2505 callout_init(&dn_timeout, CALLOUT_MPSAFE);
2508 /* Initialize curr_time adjustment mechanics. */
2509 getmicrouptime(&dn_cfg.prev_t);
2513 ip_dn_destroy(int last)
2516 /* ensure no more callouts are started */
2519 /* check for last */
2521 ND("removing last instance\n");
2522 ip_dn_ctl_ptr = NULL;
2523 ip_dn_io_ptr = NULL;
2529 callout_drain(&dn_timeout);
2530 taskqueue_drain(dn_tq, &dn_task);
2531 taskqueue_free(dn_tq);
2533 dn_ht_free(dn_cfg.schedhash, 0);
2534 dn_ht_free(dn_cfg.fshash, 0);
2535 heap_free(&dn_cfg.evheap);
2541 dummynet_modevent(module_t mod, int type, void *data)
2544 if (type == MOD_LOAD) {
2546 printf("DUMMYNET already loaded\n");
2550 ip_dn_ctl_ptr = ip_dn_ctl;
2551 ip_dn_io_ptr = dummynet_io;
2553 } else if (type == MOD_UNLOAD) {
2554 ip_dn_destroy(1 /* last */);
2560 /* modevent helpers for the modules */
2562 load_dn_sched(struct dn_alg *d)
2567 return 1; /* error */
2568 ip_dn_init(); /* just in case, we need the lock */
2570 /* Check that mandatory funcs exists */
2571 if (d->enqueue == NULL || d->dequeue == NULL) {
2572 D("missing enqueue or dequeue for %s", d->name);
2576 /* Search if scheduler already exists */
2578 SLIST_FOREACH(s, &dn_cfg.schedlist, next) {
2579 if (strcmp(s->name, d->name) == 0) {
2580 D("%s already loaded", d->name);
2581 break; /* scheduler already exists */
2585 SLIST_INSERT_HEAD(&dn_cfg.schedlist, d, next);
2587 D("dn_sched %s %sloaded", d->name, s ? "not ":"");
2592 unload_dn_sched(struct dn_alg *s)
2594 struct dn_alg *tmp, *r;
2597 ND("called for %s", s->name);
2600 SLIST_FOREACH_SAFE(r, &dn_cfg.schedlist, next, tmp) {
2601 if (strcmp(s->name, r->name) != 0)
2603 ND("ref_count = %d", r->ref_count);
2604 err = (r->ref_count != 0) ? EBUSY : 0;
2606 SLIST_REMOVE(&dn_cfg.schedlist, r, dn_alg, next);
2610 D("dn_sched %s %sunloaded", s->name, err ? "not ":"");
2615 dn_sched_modevent(module_t mod, int cmd, void *arg)
2617 struct dn_alg *sch = arg;
2619 if (cmd == MOD_LOAD)
2620 return load_dn_sched(sch);
2621 else if (cmd == MOD_UNLOAD)
2622 return unload_dn_sched(sch);
2627 static moduledata_t dummynet_mod = {
2628 "dummynet", dummynet_modevent, NULL
2631 #define DN_SI_SUB SI_SUB_PROTO_IFATTACHDOMAIN
2632 #define DN_MODEV_ORD (SI_ORDER_ANY - 128) /* after ipfw */
2633 DECLARE_MODULE(dummynet, dummynet_mod, DN_SI_SUB, DN_MODEV_ORD);
2634 MODULE_DEPEND(dummynet, ipfw, 2, 2, 2);
2635 MODULE_VERSION(dummynet, 3);
2638 * Starting up. Done in order after dummynet_modevent() has been called.
2639 * VNET_SYSINIT is also called for each existing vnet and each new vnet.
2641 //VNET_SYSINIT(vnet_dn_init, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_init, NULL);
2644 * Shutdown handlers up shop. These are done in REVERSE ORDER, but still
2645 * after dummynet_modevent() has been called. Not called on reboot.
2646 * VNET_SYSUNINIT is also called for each exiting vnet as it exits.
2647 * or when the module is unloaded.
2649 //VNET_SYSUNINIT(vnet_dn_uninit, DN_SI_SUB, DN_MODEV_ORD+2, ip_dn_destroy, NULL);
2653 /* modevent helpers for the AQM modules */
2655 load_dn_aqm(struct dn_aqm *d)
2657 struct dn_aqm *aqm=NULL;
2660 return 1; /* error */
2661 ip_dn_init(); /* just in case, we need the lock */
2663 /* Check that mandatory funcs exists */
2664 if (d->enqueue == NULL || d->dequeue == NULL) {
2665 D("missing enqueue or dequeue for %s", d->name);
2669 /* Search if AQM already exists */
2671 SLIST_FOREACH(aqm, &dn_cfg.aqmlist, next) {
2672 if (strcmp(aqm->name, d->name) == 0) {
2673 D("%s already loaded", d->name);
2674 break; /* AQM already exists */
2678 SLIST_INSERT_HEAD(&dn_cfg.aqmlist, d, next);
2680 D("dn_aqm %s %sloaded", d->name, aqm ? "not ":"");
2685 /* Callback to clean up AQM status for queues connected to a flowset
2686 * and then deconfigure the flowset.
2687 * This function is called before an AQM module is unloaded
2690 fs_cleanup(void *_fs, void *arg)
2692 struct dn_fsk *fs = _fs;
2693 uint32_t type = *(uint32_t *)arg;
2695 if (fs->aqmfp && fs->aqmfp->type == type)
2696 aqm_cleanup_deconfig_fs(fs);
2702 unload_dn_aqm(struct dn_aqm *aqm)
2704 struct dn_aqm *tmp, *r;
2707 ND("called for %s", aqm->name);
2711 /* clean up AQM status and deconfig flowset */
2712 dn_ht_scan(dn_cfg.fshash, fs_cleanup, &aqm->type);
2714 SLIST_FOREACH_SAFE(r, &dn_cfg.aqmlist, next, tmp) {
2715 if (strcmp(aqm->name, r->name) != 0)
2717 ND("ref_count = %d", r->ref_count);
2718 err = (r->ref_count != 0 || r->cfg_ref_count != 0) ? EBUSY : 0;
2720 SLIST_REMOVE(&dn_cfg.aqmlist, r, dn_aqm, next);
2724 D("%s %sunloaded", aqm->name, err ? "not ":"");
2726 D("ref_count=%d, cfg_ref_count=%d", r->ref_count, r->cfg_ref_count);
2731 dn_aqm_modevent(module_t mod, int cmd, void *arg)
2733 struct dn_aqm *aqm = arg;
2735 if (cmd == MOD_LOAD)
2736 return load_dn_aqm(aqm);
2737 else if (cmd == MOD_UNLOAD)
2738 return unload_dn_aqm(aqm);