2 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
4 * Supported by: Valeria Paoli
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
32 * Sockopt support for ipfw. The routines here implement
33 * the upper half of the ipfw code.
39 #error IPFIREWALL requires INET.
41 #include "opt_inet6.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h> /* struct m_tag used by nested headers */
47 #include <sys/kernel.h>
51 #include <sys/rwlock.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/sysctl.h>
55 #include <sys/syslog.h>
56 #include <sys/fnv_hash.h>
58 #include <net/route.h>
61 #include <netinet/in.h>
62 #include <netinet/ip_var.h> /* hooks */
63 #include <netinet/ip_fw.h>
65 #include <netpfil/ipfw/ip_fw_private.h>
66 #include <netpfil/ipfw/ip_fw_table.h>
69 #include <security/mac/mac_framework.h>
72 static int ipfw_ctl(struct sockopt *sopt);
73 static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len,
74 struct rule_check_info *ci);
75 static int check_ipfw_rule1(struct ip_fw_rule *rule, int size,
76 struct rule_check_info *ci);
77 static int check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
78 struct rule_check_info *ci);
80 #define NAMEDOBJ_HASH_SIZE 32
82 struct namedobj_instance {
83 struct namedobjects_head *names;
84 struct namedobjects_head *values;
85 uint32_t nn_size; /* names hash size */
86 uint32_t nv_size; /* number hash size */
87 u_long *idx_mask; /* used items bitmask */
88 uint32_t max_blocks; /* number of "long" blocks in bitmask */
89 uint32_t count; /* number of items */
90 uint16_t free_off[IPFW_MAX_SETS]; /* first possible free offset */
91 objhash_hash_f *hash_f;
94 #define BLOCK_ITEMS (8 * sizeof(u_long)) /* Number of items for ffsl() */
96 static uint32_t objhash_hash_name(struct namedobj_instance *ni, void *key,
98 static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val);
99 static int objhash_cmp_name(struct named_object *no, void *name, uint32_t set);
101 static int ipfw_flush_sopt_data(struct sockopt_data *sd);
103 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
106 * static variables followed by global ones
111 static VNET_DEFINE(uma_zone_t, ipfw_cntr_zone);
112 #define V_ipfw_cntr_zone VNET(ipfw_cntr_zone)
118 V_ipfw_cntr_zone = uma_zcreate("IPFW counters",
119 sizeof(ip_fw_cntr), NULL, NULL, NULL, NULL,
120 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
124 ipfw_destroy_counters()
127 uma_zdestroy(V_ipfw_cntr_zone);
131 ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize)
135 rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO);
136 rule->cntr = uma_zalloc(V_ipfw_cntr_zone, M_WAITOK | M_ZERO);
142 free_rule(struct ip_fw *rule)
145 uma_zfree(V_ipfw_cntr_zone, rule->cntr);
155 ipfw_destroy_counters()
160 ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize)
164 rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO);
170 free_rule(struct ip_fw *rule)
180 * Find the smallest rule >= key, id.
181 * We could use bsearch but it is so simple that we code it directly
184 ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id)
189 for (lo = 0, hi = chain->n_rules - 1; lo < hi;) {
192 if (r->rulenum < key)
193 lo = i + 1; /* continue from the next one */
194 else if (r->rulenum > key)
195 hi = i; /* this might be good */
197 lo = i + 1; /* continue from the next one */
198 else /* r->id >= id */
199 hi = i; /* this might be good */
205 * Builds skipto cache on rule set @map.
208 update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map)
213 IPFW_UH_WLOCK_ASSERT(chain);
216 rulenum = map[mi]->rulenum;
217 smap = chain->idxmap_back;
222 for (i = 0; i < 65536; i++) {
224 /* Use the same rule index until i < rulenum */
225 if (i != rulenum || i == 65535)
227 /* Find next rule with num > i */
228 rulenum = map[++mi]->rulenum;
230 rulenum = map[++mi]->rulenum;
235 * Swaps prepared (backup) index with current one.
238 swap_skipto_cache(struct ip_fw_chain *chain)
242 IPFW_UH_WLOCK_ASSERT(chain);
243 IPFW_WLOCK_ASSERT(chain);
246 chain->idxmap = chain->idxmap_back;
247 chain->idxmap_back = map;
251 * Allocate and initialize skipto cache.
254 ipfw_init_skipto_cache(struct ip_fw_chain *chain)
256 int *idxmap, *idxmap_back;
258 idxmap = malloc(65536 * sizeof(uint32_t *), M_IPFW,
260 idxmap_back = malloc(65536 * sizeof(uint32_t *), M_IPFW,
264 * Note we may be called at any time after initialization,
265 * for example, on first skipto rule, so we need to
266 * provide valid chain->idxmap on return
269 IPFW_UH_WLOCK(chain);
270 if (chain->idxmap != NULL) {
271 IPFW_UH_WUNLOCK(chain);
272 free(idxmap, M_IPFW);
273 free(idxmap_back, M_IPFW);
277 /* Set backup pointer first to permit building cache */
278 chain->idxmap_back = idxmap_back;
279 update_skipto_cache(chain, chain->map);
281 /* It is now safe to set chain->idxmap ptr */
282 chain->idxmap = idxmap;
283 swap_skipto_cache(chain);
285 IPFW_UH_WUNLOCK(chain);
289 * Destroys skipto cache.
292 ipfw_destroy_skipto_cache(struct ip_fw_chain *chain)
295 if (chain->idxmap != NULL)
296 free(chain->idxmap, M_IPFW);
297 if (chain->idxmap != NULL)
298 free(chain->idxmap_back, M_IPFW);
303 * allocate a new map, returns the chain locked. extra is the number
304 * of entries to add or delete.
306 static struct ip_fw **
307 get_map(struct ip_fw_chain *chain, int extra, int locked)
314 mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK);
316 i = chain->n_rules + extra;
317 map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags);
319 printf("%s: cannot allocate map\n", __FUNCTION__);
323 IPFW_UH_WLOCK(chain);
324 if (i >= chain->n_rules + extra) /* good */
326 /* otherwise we lost the race, free and retry */
328 IPFW_UH_WUNLOCK(chain);
334 * swap the maps. It is supposed to be called with IPFW_UH_WLOCK
336 static struct ip_fw **
337 swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len)
339 struct ip_fw **old_map;
343 chain->n_rules = new_len;
344 old_map = chain->map;
345 chain->map = new_map;
346 swap_skipto_cache(chain);
353 export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr)
356 cntr->size = sizeof(*cntr);
358 if (krule->cntr != NULL) {
359 cntr->pcnt = counter_u64_fetch(krule->cntr);
360 cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
361 cntr->timestamp = krule->timestamp;
363 if (cntr->timestamp > 0)
364 cntr->timestamp += boottime.tv_sec;
368 export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr)
371 if (krule->cntr != NULL) {
372 cntr->pcnt = counter_u64_fetch(krule->cntr);
373 cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
374 cntr->timestamp = krule->timestamp;
376 if (cntr->timestamp > 0)
377 cntr->timestamp += boottime.tv_sec;
381 * Copies rule @urule from v1 userland format (current).
383 * Assume @krule is zeroed.
386 import_rule1(struct rule_check_info *ci)
388 struct ip_fw_rule *urule;
391 urule = (struct ip_fw_rule *)ci->urule;
392 krule = (struct ip_fw *)ci->krule;
395 krule->act_ofs = urule->act_ofs;
396 krule->cmd_len = urule->cmd_len;
397 krule->rulenum = urule->rulenum;
398 krule->set = urule->set;
399 krule->flags = urule->flags;
401 /* Save rulenum offset */
402 ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum);
405 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
409 * Export rule into v1 format (Current).
411 * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT)
413 * [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs).
415 * Assume @data is zeroed.
418 export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs)
420 struct ip_fw_bcounter *cntr;
421 struct ip_fw_rule *urule;
424 /* Fill in TLV header */
425 tlv = (ipfw_obj_tlv *)data;
426 tlv->type = IPFW_TLV_RULE_ENT;
431 cntr = (struct ip_fw_bcounter *)(tlv + 1);
432 urule = (struct ip_fw_rule *)(cntr + 1);
433 export_cntr1_base(krule, cntr);
435 urule = (struct ip_fw_rule *)(tlv + 1);
438 urule->act_ofs = krule->act_ofs;
439 urule->cmd_len = krule->cmd_len;
440 urule->rulenum = krule->rulenum;
441 urule->set = krule->set;
442 urule->flags = krule->flags;
443 urule->id = krule->id;
446 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
451 * Copies rule @urule from FreeBSD8 userland format (v0)
453 * Assume @krule is zeroed.
456 import_rule0(struct rule_check_info *ci)
458 struct ip_fw_rule0 *urule;
462 ipfw_insn_limit *lcmd;
465 urule = (struct ip_fw_rule0 *)ci->urule;
466 krule = (struct ip_fw *)ci->krule;
469 krule->act_ofs = urule->act_ofs;
470 krule->cmd_len = urule->cmd_len;
471 krule->rulenum = urule->rulenum;
472 krule->set = urule->set;
473 if ((urule->_pad & 1) != 0)
474 krule->flags |= IPFW_RULE_NOOPT;
476 /* Save rulenum offset */
477 ci->urule_numoff = offsetof(struct ip_fw_rule0, rulenum);
480 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
484 * 1) convert tablearg value from 65335 to 0
485 * 2) Add high bit to O_SETFIB/O_SETDSCP values (to make room for targ).
486 * 3) convert table number in iface opcodes to u16
492 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
495 switch (cmd->opcode) {
496 /* Opcodes supporting tablearg */
508 if (cmd->arg1 == 65535)
509 cmd->arg1 = IP_FW_TARG;
513 if (cmd->arg1 == 65535)
514 cmd->arg1 = IP_FW_TARG;
519 lcmd = (ipfw_insn_limit *)cmd;
520 if (lcmd->conn_limit == 65535)
521 lcmd->conn_limit = IP_FW_TARG;
523 /* Interface tables */
527 /* Interface table, possibly */
528 cmdif = (ipfw_insn_if *)cmd;
529 if (cmdif->name[0] != '\1')
532 cmdif->p.kidx = (uint16_t)cmdif->p.glob;
539 * Copies rule @krule from kernel to FreeBSD8 userland format (v0)
542 export_rule0(struct ip_fw *krule, struct ip_fw_rule0 *urule, int len)
546 ipfw_insn_limit *lcmd;
550 memset(urule, 0, len);
551 urule->act_ofs = krule->act_ofs;
552 urule->cmd_len = krule->cmd_len;
553 urule->rulenum = krule->rulenum;
554 urule->set = krule->set;
555 if ((krule->flags & IPFW_RULE_NOOPT) != 0)
559 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
561 /* Export counters */
562 export_cntr0_base(krule, (struct ip_fw_bcounter0 *)&urule->pcnt);
566 * 1) convert tablearg value from 0 to 65335
567 * 2) Remove highest bit from O_SETFIB/O_SETDSCP values.
568 * 3) convert table number in iface opcodes to int
574 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
577 switch (cmd->opcode) {
578 /* Opcodes supporting tablearg */
590 if (cmd->arg1 == IP_FW_TARG)
595 if (cmd->arg1 == IP_FW_TARG)
598 cmd->arg1 &= ~0x8000;
601 lcmd = (ipfw_insn_limit *)cmd;
602 if (lcmd->conn_limit == IP_FW_TARG)
603 lcmd->conn_limit = 65535;
605 /* Interface tables */
609 /* Interface table, possibly */
610 cmdif = (ipfw_insn_if *)cmd;
611 if (cmdif->name[0] != '\1')
614 cmdif->p.glob = cmdif->p.kidx;
621 * Add new rule(s) to the list possibly creating rule number for each.
622 * Update the rule_number in the input struct so the caller knows it as well.
623 * Must be called without IPFW_UH held
626 commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count)
628 int error, i, insert_before, tcount;
629 uint16_t rulenum, *pnum;
630 struct rule_check_info *ci;
632 struct ip_fw **map; /* the new array of pointers */
634 /* Check if we need to do table remap */
636 for (ci = rci, i = 0; i < count; ci++, i++) {
637 if (ci->table_opcodes == 0)
641 * Rule has some table opcodes.
642 * Reference & allocate needed tables/
644 error = ipfw_rewrite_table_uidx(chain, ci);
648 * rewrite failed, state for current rule
649 * has been reverted. Check if we need to
655 * We have some more table rules
656 * we need to rollback.
659 IPFW_UH_WLOCK(chain);
662 if (ci->table_opcodes == 0)
664 ipfw_unref_rule_tables(chain,ci->krule);
667 IPFW_UH_WUNLOCK(chain);
677 /* get_map returns with IPFW_UH_WLOCK if successful */
678 map = get_map(chain, count, 0 /* not locked */);
682 IPFW_UH_WLOCK(chain);
683 for (ci = rci, i = 0; i < count; ci++, i++) {
684 if (ci->table_opcodes == 0)
687 ipfw_unref_rule_tables(chain, ci->krule);
689 IPFW_UH_WUNLOCK(chain);
695 if (V_autoinc_step < 1)
697 else if (V_autoinc_step > 1000)
698 V_autoinc_step = 1000;
700 /* FIXME: Handle count > 1 */
703 rulenum = krule->rulenum;
705 /* find the insertion point, we will insert before */
706 insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE;
707 i = ipfw_find_rule(chain, insert_before, 0);
708 /* duplicate first part */
710 bcopy(chain->map, map, i * sizeof(struct ip_fw *));
712 /* duplicate remaining part, we always have the default rule */
713 bcopy(chain->map + i, map + i + 1,
714 sizeof(struct ip_fw *) *(chain->n_rules - i));
716 /* Compute rule number and write it back */
717 rulenum = i > 0 ? map[i-1]->rulenum : 0;
718 if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step)
719 rulenum += V_autoinc_step;
720 krule->rulenum = rulenum;
721 /* Save number to userland rule */
722 pnum = (uint16_t *)((caddr_t)ci->urule + ci->urule_numoff);
726 krule->id = chain->id + 1;
727 update_skipto_cache(chain, map);
728 map = swap_map(chain, map, chain->n_rules + 1);
729 chain->static_len += RULEUSIZE0(krule);
730 IPFW_UH_WUNLOCK(chain);
737 * Adds @rule to the list of rules to reap
740 ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head,
744 IPFW_UH_WLOCK_ASSERT(chain);
746 /* Unlink rule from everywhere */
747 ipfw_unref_rule_tables(chain, rule);
749 *((struct ip_fw **)rule) = *head;
754 * Reclaim storage associated with a list of rules. This is
755 * typically the list created using remove_rule.
756 * A NULL pointer on input is handled correctly.
759 ipfw_reap_rules(struct ip_fw *head)
763 while ((rule = head) != NULL) {
764 head = *((struct ip_fw **)head);
771 * (default || reserved || !match_set || !match_number)
773 * default ::= (rule->rulenum == IPFW_DEFAULT_RULE)
774 * // the default rule is always protected
776 * reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET)
777 * // RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush")
779 * match_set ::= (cmd == 0 || rule->set == set)
780 * // set number is ignored for cmd == 0
782 * match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum)
783 * // number is ignored for cmd == 1 or n == 0
787 ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt)
790 /* Don't match default rule regardless of query */
791 if (rule->rulenum == IPFW_DEFAULT_RULE)
794 /* Don't match rules in reserved set for flush requests */
795 if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET)
798 /* If we're filtering by set, don't match other sets */
799 if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set)
802 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 &&
803 (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule))
810 * Delete rules matching range @rt.
811 * Saves number of deleted rules in @ndel.
813 * Returns 0 on success.
816 delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel)
818 struct ip_fw *reap, *rule, **map;
823 IPFW_UH_WLOCK(chain); /* arbitrate writers */
826 * Stage 1: Determine range to inspect.
827 * Range is half-inclusive, e.g [start, end).
830 end = chain->n_rules - 1;
832 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) {
833 start = ipfw_find_rule(chain, rt->start_rule, 0);
835 end = ipfw_find_rule(chain, rt->end_rule, 0);
836 if (rt->end_rule != IPFW_DEFAULT_RULE)
837 while (chain->map[end]->rulenum == rt->end_rule)
841 /* Allocate new map of the same size */
842 map = get_map(chain, 0, 1 /* locked */);
844 IPFW_UH_WUNLOCK(chain);
851 /* 1. bcopy the initial part of the map */
853 bcopy(chain->map, map, start * sizeof(struct ip_fw *));
854 /* 2. copy active rules between start and end */
855 for (i = start; i < end; i++) {
856 rule = chain->map[i];
857 if (ipfw_match_range(rule, rt) == 0) {
863 if (ipfw_is_dyn_rule(rule) != 0)
866 /* 3. copy the final part of the map */
867 bcopy(chain->map + end, map + ofs,
868 (chain->n_rules - end) * sizeof(struct ip_fw *));
869 /* 4. recalculate skipto cache */
870 update_skipto_cache(chain, map);
871 /* 5. swap the maps (under UH_WLOCK + WHLOCK) */
872 map = swap_map(chain, map, chain->n_rules - n);
873 /* 6. Remove all dynamic states originated by deleted rules */
875 ipfw_expire_dyn_rules(chain, rt);
876 /* 7. now remove the rules deleted from the old map */
877 for (i = start; i < end; i++) {
879 if (ipfw_match_range(rule, rt) == 0)
881 chain->static_len -= RULEUSIZE0(rule);
882 ipfw_reap_add(chain, &reap, rule);
884 IPFW_UH_WUNLOCK(chain);
886 ipfw_reap_rules(reap);
894 * Changes set of given rule rannge @rt
897 * Returns 0 on success.
900 move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
905 IPFW_UH_WLOCK(chain);
908 * Move rules with matching paramenerts to a new set.
909 * This one is much more complex. We have to ensure
910 * that all referenced tables (if any) are referenced
911 * by given rule subset only. Otherwise, we can't move
912 * them to new set and have to return error.
914 if (V_fw_tables_sets != 0) {
915 if (ipfw_move_tables_sets(chain, rt, rt->new_set) != 0) {
916 IPFW_UH_WUNLOCK(chain);
921 /* XXX: We have to do swap holding WLOCK */
922 for (i = 0; i < chain->n_rules - 1; i++) {
923 rule = chain->map[i];
924 if (ipfw_match_range(rule, rt) == 0)
926 rule->set = rt->new_set;
929 IPFW_UH_WUNLOCK(chain);
935 * Clear counters for a specific rule.
936 * Normally run under IPFW_UH_RLOCK, but these are idempotent ops
937 * so we only care that rules do not disappear.
940 clear_counters(struct ip_fw *rule, int log_only)
942 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
945 IPFW_ZERO_RULE_COUNTER(rule);
946 if (l->o.opcode == O_LOG)
947 l->log_left = l->max_log;
951 * Flushes rules counters and/or log values on matching range.
953 * Returns number of items cleared.
956 clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only)
964 IPFW_UH_WLOCK(chain); /* arbitrate writers */
965 for (i = 0; i < chain->n_rules - 1; i++) {
966 rule = chain->map[i];
967 if (ipfw_match_range(rule, rt) == 0)
969 clear_counters(rule, log_only);
972 IPFW_UH_WUNLOCK(chain);
978 check_range_tlv(ipfw_range_tlv *rt)
981 if (rt->head.length != sizeof(*rt))
983 if (rt->start_rule > rt->end_rule)
985 if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS)
992 * Delete rules matching specified parameters
993 * Data layout (v0)(current):
994 * Request: [ ipfw_obj_header ipfw_range_tlv ]
995 * Reply: [ ipfw_obj_header ipfw_range_tlv ]
997 * Saves number of deleted rules in ipfw_range_tlv->new_set.
999 * Returns 0 on success.
1002 del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1003 struct sockopt_data *sd)
1005 ipfw_range_header *rh;
1008 if (sd->valsize != sizeof(*rh))
1011 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1013 if (check_range_tlv(&rh->range) != 0)
1017 if ((error = delete_range(chain, &rh->range, &ndel)) != 0)
1020 /* Save number of rules deleted */
1021 rh->range.new_set = ndel;
1026 * Move rules/sets matching specified parameters
1027 * Data layout (v0)(current):
1028 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1030 * Returns 0 on success.
1033 move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1034 struct sockopt_data *sd)
1036 ipfw_range_header *rh;
1038 if (sd->valsize != sizeof(*rh))
1041 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1043 if (check_range_tlv(&rh->range) != 0)
1046 return (move_range(chain, &rh->range));
1050 * Clear rule accounting data matching specified parameters
1051 * Data layout (v0)(current):
1052 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1053 * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1055 * Saves number of cleared rules in ipfw_range_tlv->new_set.
1057 * Returns 0 on success.
1060 clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1061 struct sockopt_data *sd)
1063 ipfw_range_header *rh;
1067 if (sd->valsize != sizeof(*rh))
1070 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1072 if (check_range_tlv(&rh->range) != 0)
1075 log_only = (op3->opcode == IP_FW_XRESETLOG);
1077 num = clear_range(chain, &rh->range, log_only);
1079 if (rh->range.flags & IPFW_RCFLAG_ALL)
1080 msg = log_only ? "All logging counts reset" :
1081 "Accounting cleared";
1083 msg = log_only ? "logging count reset" : "cleared";
1086 int lev = LOG_SECURITY | LOG_NOTICE;
1087 log(lev, "ipfw: %s.\n", msg);
1090 /* Save number of rules cleared */
1091 rh->range.new_set = num;
1096 enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1100 IPFW_UH_WLOCK_ASSERT(chain);
1102 /* Change enabled/disabled sets mask */
1103 v_set = (V_set_disable | rt->set) & ~rt->new_set;
1104 v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */
1106 V_set_disable = v_set;
1107 IPFW_WUNLOCK(chain);
1111 swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv)
1116 IPFW_UH_WLOCK_ASSERT(chain);
1118 /* Swap or move two sets */
1119 for (i = 0; i < chain->n_rules - 1; i++) {
1120 rule = chain->map[i];
1121 if (rule->set == rt->set)
1122 rule->set = rt->new_set;
1123 else if (rule->set == rt->new_set && mv == 0)
1124 rule->set = rt->set;
1126 if (V_fw_tables_sets != 0)
1127 ipfw_swap_tables_sets(chain, rt->set, rt->new_set, mv);
1131 * Swaps or moves set
1132 * Data layout (v0)(current):
1133 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1135 * Returns 0 on success.
1138 manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1139 struct sockopt_data *sd)
1141 ipfw_range_header *rh;
1143 if (sd->valsize != sizeof(*rh))
1146 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1148 if (rh->range.head.length != sizeof(ipfw_range_tlv))
1151 IPFW_UH_WLOCK(chain);
1152 switch (op3->opcode) {
1153 case IP_FW_SET_SWAP:
1154 case IP_FW_SET_MOVE:
1155 swap_sets(chain, &rh->range, op3->opcode == IP_FW_SET_MOVE);
1157 case IP_FW_SET_ENABLE:
1158 enable_sets(chain, &rh->range);
1161 IPFW_UH_WUNLOCK(chain);
1167 * Remove all rules with given number, or do set manipulation.
1168 * Assumes chain != NULL && *chain != NULL.
1170 * The argument is an uint32_t. The low 16 bit are the rule or set number;
1171 * the next 8 bits are the new set; the top 8 bits indicate the command:
1173 * 0 delete rules numbered "rulenum"
1174 * 1 delete rules in set "rulenum"
1175 * 2 move rules "rulenum" to set "new_set"
1176 * 3 move rules from set "rulenum" to set "new_set"
1177 * 4 swap sets "rulenum" and "new_set"
1178 * 5 delete rules "rulenum" and set "new_set"
1181 del_entry(struct ip_fw_chain *chain, uint32_t arg)
1183 uint32_t num; /* rule number or old_set */
1184 uint8_t cmd, new_set;
1190 cmd = (arg >> 24) & 0xff;
1191 new_set = (arg >> 16) & 0xff;
1193 if (cmd > 5 || new_set > RESVD_SET)
1195 if (cmd == 0 || cmd == 2 || cmd == 5) {
1196 if (num >= IPFW_DEFAULT_RULE)
1199 if (num > RESVD_SET) /* old_set */
1203 /* Convert old requests into new representation */
1204 memset(&rt, 0, sizeof(rt));
1205 rt.start_rule = num;
1208 rt.new_set = new_set;
1212 case 0: /* delete rules numbered "rulenum" */
1214 rt.flags |= IPFW_RCFLAG_ALL;
1216 rt.flags |= IPFW_RCFLAG_RANGE;
1219 case 1: /* delete rules in set "rulenum" */
1220 rt.flags |= IPFW_RCFLAG_SET;
1223 case 5: /* delete rules "rulenum" and set "new_set" */
1224 rt.flags |= IPFW_RCFLAG_RANGE | IPFW_RCFLAG_SET;
1229 case 2: /* move rules "rulenum" to set "new_set" */
1230 rt.flags |= IPFW_RCFLAG_RANGE;
1232 case 3: /* move rules from set "rulenum" to set "new_set" */
1233 IPFW_UH_WLOCK(chain);
1234 swap_sets(chain, &rt, 1);
1235 IPFW_UH_WUNLOCK(chain);
1237 case 4: /* swap sets "rulenum" and "new_set" */
1238 IPFW_UH_WLOCK(chain);
1239 swap_sets(chain, &rt, 0);
1240 IPFW_UH_WUNLOCK(chain);
1247 if ((error = delete_range(chain, &rt, &ndel)) != 0)
1250 if (ndel == 0 && (cmd != 1 && num != 0))
1256 return (move_range(chain, &rt));
1260 * Reset some or all counters on firewall rules.
1261 * The argument `arg' is an u_int32_t. The low 16 bit are the rule number,
1262 * the next 8 bits are the set number, the top 8 bits are the command:
1263 * 0 work with rules from all set's;
1264 * 1 work with rules only from specified set.
1265 * Specified rule number is zero if we want to clear all entries.
1266 * log_only is 1 if we only want to reset logs, zero otherwise.
1269 zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only)
1275 uint16_t rulenum = arg & 0xffff;
1276 uint8_t set = (arg >> 16) & 0xff;
1277 uint8_t cmd = (arg >> 24) & 0xff;
1281 if (cmd == 1 && set > RESVD_SET)
1284 IPFW_UH_RLOCK(chain);
1286 V_norule_counter = 0;
1287 for (i = 0; i < chain->n_rules; i++) {
1288 rule = chain->map[i];
1289 /* Skip rules not in our set. */
1290 if (cmd == 1 && rule->set != set)
1292 clear_counters(rule, log_only);
1294 msg = log_only ? "All logging counts reset" :
1295 "Accounting cleared";
1298 for (i = 0; i < chain->n_rules; i++) {
1299 rule = chain->map[i];
1300 if (rule->rulenum == rulenum) {
1301 if (cmd == 0 || rule->set == set)
1302 clear_counters(rule, log_only);
1305 if (rule->rulenum > rulenum)
1308 if (!cleared) { /* we did not find any matching rules */
1309 IPFW_UH_RUNLOCK(chain);
1312 msg = log_only ? "logging count reset" : "cleared";
1314 IPFW_UH_RUNLOCK(chain);
1317 int lev = LOG_SECURITY | LOG_NOTICE;
1320 log(lev, "ipfw: Entry %d %s.\n", rulenum, msg);
1322 log(lev, "ipfw: %s.\n", msg);
1329 * Check rule head in FreeBSD11 format
1333 check_ipfw_rule1(struct ip_fw_rule *rule, int size,
1334 struct rule_check_info *ci)
1338 if (size < sizeof(*rule)) {
1339 printf("ipfw: rule too short\n");
1343 /* Check for valid cmd_len */
1344 l = roundup2(RULESIZE(rule), sizeof(uint64_t));
1346 printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1349 if (rule->act_ofs >= rule->cmd_len) {
1350 printf("ipfw: bogus action offset (%u > %u)\n",
1351 rule->act_ofs, rule->cmd_len - 1);
1355 if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1358 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1362 * Check rule head in FreeBSD8 format
1366 check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
1367 struct rule_check_info *ci)
1371 if (size < sizeof(*rule)) {
1372 printf("ipfw: rule too short\n");
1376 /* Check for valid cmd_len */
1377 l = sizeof(*rule) + rule->cmd_len * 4 - 4;
1379 printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1382 if (rule->act_ofs >= rule->cmd_len) {
1383 printf("ipfw: bogus action offset (%u > %u)\n",
1384 rule->act_ofs, rule->cmd_len - 1);
1388 if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1391 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1395 check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci)
1403 * Now go for the individual checks. Very simple ones, basically only
1404 * instruction sizes.
1406 for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) {
1407 cmdlen = F_LEN(cmd);
1409 printf("ipfw: opcode %d size truncated\n",
1413 switch (cmd->opcode) {
1425 case O_IPPRECEDENCE:
1443 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1448 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1450 if (cmd->arg1 >= rt_numfibs) {
1451 printf("ipfw: invalid fib number %d\n",
1458 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1460 if ((cmd->arg1 != IP_FW_TARG) &&
1461 ((cmd->arg1 & 0x7FFFF) >= rt_numfibs)) {
1462 printf("ipfw: invalid fib number %d\n",
1463 cmd->arg1 & 0x7FFFF);
1477 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1482 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
1487 if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
1490 ((ipfw_insn_log *)cmd)->log_left =
1491 ((ipfw_insn_log *)cmd)->max_log;
1497 /* only odd command lengths */
1498 if ( !(cmdlen & 1) || cmdlen > 31)
1504 if (cmd->arg1 == 0 || cmd->arg1 > 256) {
1505 printf("ipfw: invalid set size %d\n",
1509 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1514 case O_IP_SRC_LOOKUP:
1515 case O_IP_DST_LOOKUP:
1516 if (cmd->arg1 >= V_fw_tables_max) {
1517 printf("ipfw: invalid table number %d\n",
1521 if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1522 cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 &&
1523 cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1525 ci->table_opcodes++;
1527 case O_IP_FLOW_LOOKUP:
1528 if (cmd->arg1 >= V_fw_tables_max) {
1529 printf("ipfw: invalid table number %d\n",
1533 if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1534 cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1536 ci->table_opcodes++;
1539 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
1550 if (cmdlen < 1 || cmdlen > 31)
1555 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1)
1561 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
1562 if (cmdlen < 2 || cmdlen > 31)
1569 if (((ipfw_insn_if *)cmd)->name[0] == '\1')
1570 ci->table_opcodes++;
1571 if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
1576 if (cmdlen != F_INSN_SIZE(ipfw_insn_altq))
1582 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1587 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa))
1592 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6))
1599 if (ip_divert_ptr == NULL)
1605 if (ng_ipfw_input_p == NULL)
1610 if (!IPFW_NAT_LOADED)
1612 if (cmdlen != F_INSN_SIZE(ipfw_insn_nat))
1615 case O_FORWARD_MAC: /* XXX not implemented yet */
1629 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1633 printf("ipfw: opcode %d, multiple actions"
1640 printf("ipfw: opcode %d, action must be"
1649 if (cmdlen != F_INSN_SIZE(struct in6_addr) +
1650 F_INSN_SIZE(ipfw_insn))
1655 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1656 ((ipfw_insn_u32 *)cmd)->o.arg1)
1660 case O_IP6_SRC_MASK:
1661 case O_IP6_DST_MASK:
1662 if ( !(cmdlen & 1) || cmdlen > 127)
1666 if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) )
1672 switch (cmd->opcode) {
1682 case O_IP6_SRC_MASK:
1683 case O_IP6_DST_MASK:
1685 printf("ipfw: no IPv6 support in kernel\n");
1686 return (EPROTONOSUPPORT);
1689 printf("ipfw: opcode %d, unknown opcode\n",
1695 if (have_action == 0) {
1696 printf("ipfw: missing action\n");
1702 printf("ipfw: opcode %d size %d wrong\n",
1703 cmd->opcode, cmdlen);
1709 * Translation of requests for compatibility with FreeBSD 7.2/8.
1710 * a static variable tells us if we have an old client from userland,
1711 * and if necessary we translate requests and responses between the
1717 struct ip_fw7 *next; /* linked list of rules */
1718 struct ip_fw7 *next_rule; /* ptr to next [skipto] rule */
1719 /* 'next_rule' is used to pass up 'set_disable' status */
1721 uint16_t act_ofs; /* offset of action in 32-bit units */
1722 uint16_t cmd_len; /* # of 32-bit words in cmd */
1723 uint16_t rulenum; /* rule number */
1724 uint8_t set; /* rule set (0..31) */
1725 // #define RESVD_SET 31 /* set for default and persistent rules */
1726 uint8_t _pad; /* padding */
1727 // uint32_t id; /* rule id, only in v.8 */
1728 /* These fields are present in all rules. */
1729 uint64_t pcnt; /* Packet counter */
1730 uint64_t bcnt; /* Byte counter */
1731 uint32_t timestamp; /* tv_sec of last match */
1733 ipfw_insn cmd[1]; /* storage for commands */
1736 static int convert_rule_to_7(struct ip_fw_rule0 *rule);
1737 static int convert_rule_to_8(struct ip_fw_rule0 *rule);
1740 #define RULESIZE7(rule) (sizeof(struct ip_fw7) + \
1741 ((struct ip_fw7 *)(rule))->cmd_len * 4 - 4)
1746 * Copy the static and dynamic rules to the supplied buffer
1747 * and return the amount of space actually used.
1748 * Must be run under IPFW_UH_RLOCK
1751 ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space)
1754 char *ep = bp + space;
1756 struct ip_fw_rule0 *dst;
1757 int error, i, l, warnflag;
1758 time_t boot_seconds;
1762 boot_seconds = boottime.tv_sec;
1763 for (i = 0; i < chain->n_rules; i++) {
1764 rule = chain->map[i];
1767 /* Convert rule to FreeBSd 7.2 format */
1768 l = RULESIZE7(rule);
1769 if (bp + l + sizeof(uint32_t) <= ep) {
1770 bcopy(rule, bp, l + sizeof(uint32_t));
1771 error = ipfw_rewrite_table_kidx(chain,
1772 (struct ip_fw_rule0 *)bp);
1775 error = convert_rule_to_7((struct ip_fw_rule0 *) bp);
1777 return 0; /*XXX correct? */
1779 * XXX HACK. Store the disable mask in the "next"
1780 * pointer in a wild attempt to keep the ABI the same.
1781 * Why do we do this on EVERY rule?
1783 bcopy(&V_set_disable,
1784 &(((struct ip_fw7 *)bp)->next_rule),
1785 sizeof(V_set_disable));
1786 if (((struct ip_fw7 *)bp)->timestamp)
1787 ((struct ip_fw7 *)bp)->timestamp += boot_seconds;
1790 continue; /* go to next rule */
1793 l = RULEUSIZE0(rule);
1794 if (bp + l > ep) { /* should not happen */
1795 printf("overflow dumping static rules\n");
1798 dst = (struct ip_fw_rule0 *)bp;
1799 export_rule0(rule, dst, l);
1800 error = ipfw_rewrite_table_kidx(chain, dst);
1803 * XXX HACK. Store the disable mask in the "next"
1804 * pointer in a wild attempt to keep the ABI the same.
1805 * Why do we do this on EVERY rule?
1807 * XXX: "ipfw set show" (ab)uses IP_FW_GET to read disabled mask
1808 * so we need to fail _after_ saving at least one mask.
1810 bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable));
1812 dst->timestamp += boot_seconds;
1817 /* Non-fatal table rewrite error. */
1821 printf("Stop on rule %d. Fail to convert table\n",
1827 printf("ipfw: process %s is using legacy interfaces,"
1828 " consider rebuilding\n", "");
1829 ipfw_get_dynamic(chain, &bp, ep); /* protected by the dynamic lock */
1830 return (bp - (char *)buf);
1835 uint32_t b; /* start rule */
1836 uint32_t e; /* end rule */
1837 uint32_t rcount; /* number of rules */
1838 uint32_t rsize; /* rules size */
1839 uint32_t tcount; /* number of tables */
1840 int rcounters; /* counters */
1844 * Dumps static rules with table TLVs in buffer @sd.
1846 * Returns 0 on success.
1849 dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da,
1850 uint32_t *bmask, struct sockopt_data *sd)
1855 ipfw_obj_ctlv *ctlv;
1856 struct ip_fw *krule;
1859 /* Dump table names first (if any) */
1860 if (da->tcount > 0) {
1862 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
1865 ctlv->head.type = IPFW_TLV_TBLNAME_LIST;
1866 ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) +
1868 ctlv->count = da->tcount;
1869 ctlv->objsize = sizeof(ipfw_obj_ntlv);
1873 tcount = da->tcount;
1874 while (tcount > 0) {
1875 if ((bmask[i / 32] & (1 << (i % 32))) == 0) {
1880 if ((error = ipfw_export_table_ntlv(chain, i, sd)) != 0)
1888 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
1891 ctlv->head.type = IPFW_TLV_RULE_LIST;
1892 ctlv->head.length = da->rsize + sizeof(*ctlv);
1893 ctlv->count = da->rcount;
1895 for (i = da->b; i < da->e; i++) {
1896 krule = chain->map[i];
1898 l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv);
1899 if (da->rcounters != 0)
1900 l += sizeof(struct ip_fw_bcounter);
1901 dst = (caddr_t)ipfw_get_sopt_space(sd, l);
1905 export_rule1(krule, dst, l, da->rcounters);
1912 * Dumps requested objects data
1913 * Data layout (version 0)(current):
1914 * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags
1915 * size = ipfw_cfg_lheader.size
1916 * Reply: [ ipfw_rules_lheader
1917 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
1918 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST)
1919 * ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ]
1921 * [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional)
1923 * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize.
1924 * The rest (size, count) are set to zero and needs to be ignored.
1926 * Returns 0 on success.
1929 dump_config(struct ip_fw_chain *chain, struct sockopt_data *sd)
1931 ipfw_cfg_lheader *hdr;
1936 struct dump_args da;
1939 hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
1945 /* Allocate needed state */
1946 if (hdr->flags & IPFW_CFG_GET_STATIC)
1947 bmask = malloc(IPFW_TABLES_MAX / 8, M_TEMP, M_WAITOK | M_ZERO);
1949 IPFW_UH_RLOCK(chain);
1952 * STAGE 1: Determine size/count for objects in range.
1953 * Prepare used tables bitmask.
1956 memset(&da, 0, sizeof(da));
1959 da.e = chain->n_rules;
1961 if (hdr->end_rule != 0) {
1962 /* Handle custom range */
1963 if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE)
1964 rnum = IPFW_DEFAULT_RULE;
1965 da.b = ipfw_find_rule(chain, rnum, 0);
1966 rnum = hdr->end_rule;
1967 rnum = (rnum < IPFW_DEFAULT_RULE) ? rnum+1 : IPFW_DEFAULT_RULE;
1968 da.e = ipfw_find_rule(chain, rnum, 0);
1971 if (hdr->flags & IPFW_CFG_GET_STATIC) {
1972 for (i = da.b; i < da.e; i++) {
1973 rule = chain->map[i];
1974 da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv);
1976 da.tcount += ipfw_mark_table_kidx(chain, rule, bmask);
1978 /* Add counters if requested */
1979 if (hdr->flags & IPFW_CFG_GET_COUNTERS) {
1980 da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount;
1985 sz += da.tcount * sizeof(ipfw_obj_ntlv) +
1986 sizeof(ipfw_obj_ctlv);
1987 sz += da.rsize + sizeof(ipfw_obj_ctlv);
1990 if (hdr->flags & IPFW_CFG_GET_STATES)
1991 sz += ipfw_dyn_get_count() * sizeof(ipfw_obj_dyntlv) +
1992 sizeof(ipfw_obj_ctlv);
1996 * Fill header anyway.
1997 * Note we have to save header fields to stable storage
1998 * buffer inside @sd can be flushed after dumping rules
2001 hdr->set_mask = ~V_set_disable;
2002 hdr_flags = hdr->flags;
2005 if (sd->valsize < sz) {
2010 /* STAGE2: Store actual data */
2011 if (hdr_flags & IPFW_CFG_GET_STATIC) {
2012 error = dump_static_rules(chain, &da, bmask, sd);
2017 if (hdr_flags & IPFW_CFG_GET_STATES)
2018 error = ipfw_dump_states(chain, sd);
2021 IPFW_UH_RUNLOCK(chain);
2024 free(bmask, M_TEMP);
2029 #define IP_FW3_OPLENGTH(x) ((x)->sopt_valsize - sizeof(ip_fw3_opheader))
2030 #define IP_FW3_WRITEBUF 4096 /* small page-size write buffer */
2031 #define IP_FW3_READBUF 16 * 1024 * 1024 /* handle large rulesets */
2035 check_object_name(ipfw_obj_ntlv *ntlv)
2039 switch (ntlv->head.type) {
2040 case IPFW_TLV_TBL_NAME:
2041 error = ipfw_check_table_name(ntlv->name);
2051 * Adds one or more rules to ipfw @chain.
2052 * Data layout (version 0)(current):
2056 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1)
2057 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3)
2062 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2063 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ]
2066 * Rules in reply are modified to store their actual ruleset number.
2068 * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending
2069 * accoring to their idx field and there has to be no duplicates.
2070 * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending.
2071 * (*3) Each ip_fw structure needs to be aligned to u64 boundary.
2073 * Returns 0 on success.
2076 add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2077 struct sockopt_data *sd)
2079 ipfw_obj_ctlv *ctlv, *rtlv, *tstate;
2080 ipfw_obj_ntlv *ntlv;
2081 int clen, error, idx;
2082 uint32_t count, read;
2083 struct ip_fw_rule *r;
2084 struct rule_check_info rci, *ci, *cbuf;
2087 if (sd->valsize > IP_FW3_READBUF)
2090 op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize);
2091 ctlv = (ipfw_obj_ctlv *)(op3 + 1);
2093 read = sizeof(ip_fw3_opheader);
2097 memset(&rci, 0, sizeof(struct rule_check_info));
2099 if (read + sizeof(*ctlv) > sd->valsize)
2102 if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) {
2103 clen = ctlv->head.length;
2104 /* Check size and alignment */
2105 if (clen > sd->valsize || clen < sizeof(*ctlv))
2107 if ((clen % sizeof(uint64_t)) != 0)
2111 * Some table names or other named objects.
2112 * Check for validness.
2114 count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv);
2115 if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv))
2120 * Ensure TLVs are sorted ascending and
2121 * there are no duplicates.
2124 ntlv = (ipfw_obj_ntlv *)(ctlv + 1);
2126 if (ntlv->head.length != sizeof(ipfw_obj_ntlv))
2129 error = check_object_name(ntlv);
2133 if (ntlv->idx <= idx)
2142 read += ctlv->head.length;
2143 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2146 if (read + sizeof(*ctlv) > sd->valsize)
2149 if (ctlv->head.type == IPFW_TLV_RULE_LIST) {
2150 clen = ctlv->head.length;
2151 if (clen + read > sd->valsize || clen < sizeof(*ctlv))
2153 if ((clen % sizeof(uint64_t)) != 0)
2157 * TODO: Permit adding multiple rules at once
2159 if (ctlv->count != 1)
2162 clen -= sizeof(*ctlv);
2164 if (ctlv->count > clen / sizeof(struct ip_fw_rule))
2167 /* Allocate state for each rule or use stack */
2168 if (ctlv->count == 1) {
2169 memset(&rci, 0, sizeof(struct rule_check_info));
2172 cbuf = malloc(ctlv->count * sizeof(*ci), M_TEMP,
2177 * Check each rule for validness.
2178 * Ensure numbered rules are sorted ascending
2179 * and properly aligned
2182 r = (struct ip_fw_rule *)(ctlv + 1);
2186 rsize = roundup2(RULESIZE(r), sizeof(uint64_t));
2187 if (rsize > clen || ctlv->count <= count) {
2193 error = check_ipfw_rule1(r, rsize, ci);
2198 if (r->rulenum != 0 && r->rulenum < idx) {
2199 printf("rulenum %d idx %d\n", r->rulenum, idx);
2205 ci->urule = (caddr_t)r;
2207 rsize = roundup2(rsize, sizeof(uint64_t));
2209 r = (struct ip_fw_rule *)((caddr_t)r + rsize);
2214 if (ctlv->count != count || error != 0) {
2221 read += ctlv->head.length;
2222 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2225 if (read != sd->valsize || rtlv == NULL || rtlv->count == 0) {
2226 if (cbuf != NULL && cbuf != &rci)
2232 * Passed rules seems to be valid.
2233 * Allocate storage and try to add them to chain.
2235 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) {
2236 clen = RULEKSIZE1((struct ip_fw_rule *)ci->urule);
2237 ci->krule = ipfw_alloc_rule(chain, clen);
2241 if ((error = commit_rules(chain, cbuf, rtlv->count)) != 0) {
2242 /* Free allocate krules */
2243 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++)
2244 free(ci->krule, M_IPFW);
2247 if (cbuf != NULL && cbuf != &rci)
2254 * Writes data accumulated in @sd to sockopt buffer.
2255 * Zeroes internal @sd buffer.
2258 ipfw_flush_sopt_data(struct sockopt_data *sd)
2263 if ((sz = sd->koff) == 0)
2266 if (sd->sopt->sopt_dir == SOPT_GET) {
2267 error = sooptcopyout(sd->sopt, sd->kbuf, sz);
2272 memset(sd->kbuf, 0, sd->ksize);
2273 sd->ktotal += sd->koff;
2275 if (sd->ktotal + sd->ksize < sd->valsize)
2276 sd->kavail = sd->ksize;
2278 sd->kavail = sd->valsize - sd->ktotal;
2280 /* Update sopt buffer */
2281 sd->sopt->sopt_valsize = sd->kavail;
2282 sd->sopt->sopt_val = sd->sopt_val + sd->ktotal;
2288 * Ensures that @sd buffer has contigious @neeeded number of
2291 * Returns pointer to requested space or NULL.
2294 ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed)
2299 if (sd->kavail < needed) {
2301 * Flush data and try another time.
2303 error = ipfw_flush_sopt_data(sd);
2305 if (sd->kavail < needed || error != 0)
2309 addr = sd->kbuf + sd->koff;
2311 sd->kavail -= needed;
2316 * Requests @needed contigious bytes from @sd buffer.
2317 * Function is used to notify subsystem that we are
2318 * interesed in first @needed bytes (request header)
2319 * and the rest buffer can be safely zeroed.
2321 * Returns pointer to requested space or NULL.
2324 ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed)
2328 if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL)
2332 memset(sd->kbuf + sd->koff, 0, sd->kavail);
2338 * New sockopt handler.
2341 ipfw_ctl3(struct sockopt *sopt)
2344 size_t bsize_max, size, valsize;
2345 struct ip_fw_chain *chain;
2348 struct sockopt_data sdata;
2349 ip_fw3_opheader *op3 = NULL;
2351 error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW);
2355 if (sopt->sopt_name != IP_FW3)
2356 return (ipfw_ctl(sopt));
2358 chain = &V_layer3_chain;
2361 /* Save original valsize before it is altered via sooptcopyin() */
2362 valsize = sopt->sopt_valsize;
2363 memset(&sdata, 0, sizeof(sdata));
2364 /* Read op3 header first to determine actual operation */
2365 op3 = (ip_fw3_opheader *)xbuf;
2366 error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3));
2370 sopt->sopt_valsize = valsize;
2373 * Determine opcode type/buffer size:
2374 * use on-stack xbuf for short request,
2375 * allocate sliding-window buf for data export or
2376 * contigious buffer for special ops.
2378 ctype = (sopt->sopt_dir == SOPT_GET) ? SOPT_GET : SOPT_SET;
2382 case IP_FW_TABLE_XADD:
2383 case IP_FW_TABLE_XDEL:
2385 bsize_max = IP_FW3_READBUF;
2388 bsize_max = IP_FW3_WRITEBUF;
2392 * Disallow modifications in really-really secure mode, but still allow
2393 * the logging counters to be reset.
2395 if (ctype == SOPT_SET && opt != IP_FW_XRESETLOG) {
2396 error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
2402 * Fill in sockopt_data structure that may be useful for
2403 * IP_FW3 get requests.
2406 if (valsize <= sizeof(xbuf)) {
2408 sdata.ksize = sizeof(xbuf);
2409 sdata.kavail = valsize;
2411 if (valsize < bsize_max)
2416 sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
2418 sdata.kavail = size;
2422 sdata.sopt_val = sopt->sopt_val;
2423 sdata.valsize = valsize;
2426 * Copy either all request (if valsize < bsize_max)
2427 * or first bsize_max bytes to guarantee most consumers
2428 * that all necessary data has been copied).
2429 * Anyway, copy not less than sizeof(ip_fw3_opheader).
2431 if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize,
2432 sizeof(ip_fw3_opheader))) != 0)
2434 op3 = (ip_fw3_opheader *)sdata.kbuf;
2439 error = dump_config(chain, &sdata);
2443 error = add_rules(chain, op3, &sdata);
2447 error = del_rules(chain, op3, &sdata);
2451 case IP_FW_XRESETLOG:
2452 error = clear_rules(chain, op3, &sdata);
2456 error = move_rules(chain, op3, &sdata);
2459 case IP_FW_SET_SWAP:
2460 case IP_FW_SET_MOVE:
2461 case IP_FW_SET_ENABLE:
2462 error = manage_sets(chain, op3, &sdata);
2466 error = ipfw_list_ifaces(chain, &sdata);
2469 /*--- TABLE opcodes ---*/
2470 case IP_FW_TABLE_XCREATE:
2471 error = ipfw_create_table(chain, op3, &sdata);
2474 case IP_FW_TABLE_XDESTROY:
2475 case IP_FW_TABLE_XFLUSH:
2476 error = ipfw_flush_table(chain, op3, &sdata);
2479 case IP_FW_TABLE_XMODIFY:
2480 error = ipfw_modify_table(chain, op3, &sdata);
2483 case IP_FW_TABLE_XINFO:
2484 error = ipfw_describe_table(chain, &sdata);
2487 case IP_FW_TABLES_XLIST:
2488 error = ipfw_list_tables(chain, &sdata);
2491 case IP_FW_TABLE_XLIST:
2492 error = ipfw_dump_table(chain, op3, &sdata);
2495 case IP_FW_TABLE_XADD:
2496 case IP_FW_TABLE_XDEL:
2497 error = ipfw_manage_table_ent(chain, op3, &sdata);
2500 case IP_FW_TABLE_XFIND:
2501 error = ipfw_find_table_entry(chain, op3, &sdata);
2504 case IP_FW_TABLE_XSWAP:
2505 error = ipfw_swap_table(chain, op3, &sdata);
2508 case IP_FW_TABLES_ALIST:
2509 error = ipfw_list_table_algo(chain, &sdata);
2512 case IP_FW_TABLE_XGETSIZE:
2517 if (IP_FW3_OPLENGTH(sopt) < sizeof(uint32_t)) {
2522 tbl = (uint32_t *)(op3 + 1);
2524 memset(&ti, 0, sizeof(ti));
2526 IPFW_UH_RLOCK(chain);
2527 error = ipfw_count_xtable(chain, &ti, tbl);
2528 IPFW_UH_RUNLOCK(chain);
2531 error = sooptcopyout(sopt, op3, sopt->sopt_valsize);
2536 printf("ipfw: ipfw_ctl3 invalid option %d\n", opt);
2540 /* Flush state and free buffers */
2542 error = ipfw_flush_sopt_data(&sdata);
2544 ipfw_flush_sopt_data(&sdata);
2546 /* Restore original pointer and set number of bytes written */
2547 sopt->sopt_val = sdata.sopt_val;
2548 sopt->sopt_valsize = sdata.ktotal;
2549 if (sdata.kbuf != xbuf)
2550 free(sdata.kbuf, M_TEMP);
2556 * {set|get}sockopt parser.
2559 ipfw_ctl(struct sockopt *sopt)
2561 #define RULE_MAXSIZE (256*sizeof(u_int32_t))
2563 size_t size, valsize;
2565 struct ip_fw_rule0 *rule;
2566 struct ip_fw_chain *chain;
2567 u_int32_t rulenum[2];
2569 struct rule_check_info ci;
2571 chain = &V_layer3_chain;
2574 /* Save original valsize before it is altered via sooptcopyin() */
2575 valsize = sopt->sopt_valsize;
2576 opt = sopt->sopt_name;
2579 * Disallow modifications in really-really secure mode, but still allow
2580 * the logging counters to be reset.
2582 if (opt == IP_FW_ADD ||
2583 (sopt->sopt_dir == SOPT_SET && opt != IP_FW_RESETLOG)) {
2584 error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
2592 * pass up a copy of the current rules. Static rules
2593 * come first (the last of which has number IPFW_DEFAULT_RULE),
2594 * followed by a possibly empty list of dynamic rule.
2595 * The last dynamic rule has NULL in the "next" field.
2597 * Note that the calculated size is used to bound the
2598 * amount of data returned to the user. The rule set may
2599 * change between calculating the size and returning the
2600 * data in which case we'll just return what fits.
2605 size = chain->static_len;
2606 size += ipfw_dyn_len();
2607 if (size >= sopt->sopt_valsize)
2609 buf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
2610 IPFW_UH_RLOCK(chain);
2611 /* check again how much space we need */
2612 want = chain->static_len + ipfw_dyn_len();
2614 len = ipfw_getrules(chain, buf, size);
2615 IPFW_UH_RUNLOCK(chain);
2617 error = sooptcopyout(sopt, buf, len);
2625 /* locking is done within del_entry() */
2626 error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */
2630 rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK);
2631 error = sooptcopyin(sopt, rule, RULE_MAXSIZE,
2632 sizeof(struct ip_fw7) );
2634 memset(&ci, 0, sizeof(struct rule_check_info));
2637 * If the size of commands equals RULESIZE7 then we assume
2638 * a FreeBSD7.2 binary is talking to us (set is7=1).
2639 * is7 is persistent so the next 'ipfw list' command
2640 * will use this format.
2641 * NOTE: If wrong version is guessed (this can happen if
2642 * the first ipfw command is 'ipfw [pipe] list')
2643 * the ipfw binary may crash or loop infinitly...
2645 size = sopt->sopt_valsize;
2646 if (size == RULESIZE7(rule)) {
2648 error = convert_rule_to_8(rule);
2653 size = RULESIZE(rule);
2657 error = check_ipfw_rule0(rule, size, &ci);
2659 /* locking is done within add_rule() */
2660 struct ip_fw *krule;
2661 krule = ipfw_alloc_rule(chain, RULEKSIZE0(rule));
2662 ci.urule = (caddr_t)rule;
2665 error = commit_rules(chain, &ci, 1);
2666 if (!error && sopt->sopt_dir == SOPT_GET) {
2668 error = convert_rule_to_7(rule);
2669 size = RULESIZE7(rule);
2675 error = sooptcopyout(sopt, rule, size);
2683 * IP_FW_DEL is used for deleting single rules or sets,
2684 * and (ab)used to atomically manipulate sets. Argument size
2685 * is used to distinguish between the two:
2687 * delete single rule or set of rules,
2688 * or reassign rules (or sets) to a different set.
2689 * 2*sizeof(u_int32_t)
2690 * atomic disable/enable sets.
2691 * first u_int32_t contains sets to be disabled,
2692 * second u_int32_t contains sets to be enabled.
2694 error = sooptcopyin(sopt, rulenum,
2695 2*sizeof(u_int32_t), sizeof(u_int32_t));
2698 size = sopt->sopt_valsize;
2699 if (size == sizeof(u_int32_t) && rulenum[0] != 0) {
2700 /* delete or reassign, locking done in del_entry() */
2701 error = del_entry(chain, rulenum[0]);
2702 } else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */
2703 IPFW_UH_WLOCK(chain);
2705 (V_set_disable | rulenum[0]) & ~rulenum[1] &
2706 ~(1<<RESVD_SET); /* set RESVD_SET always enabled */
2707 IPFW_UH_WUNLOCK(chain);
2713 case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */
2715 if (sopt->sopt_val != 0) {
2716 error = sooptcopyin(sopt, rulenum,
2717 sizeof(u_int32_t), sizeof(u_int32_t));
2721 error = zero_entry(chain, rulenum[0],
2722 sopt->sopt_name == IP_FW_RESETLOG);
2725 /*--- TABLE opcodes ---*/
2726 case IP_FW_TABLE_ADD:
2727 case IP_FW_TABLE_DEL:
2729 ipfw_table_entry ent;
2730 struct tentry_info tei;
2733 error = sooptcopyin(sopt, &ent,
2734 sizeof(ent), sizeof(ent));
2738 memset(&tei, 0, sizeof(tei));
2739 tei.paddr = &ent.addr;
2740 tei.subtype = AF_INET;
2741 tei.masklen = ent.masklen;
2742 tei.value = ent.value;
2743 memset(&ti, 0, sizeof(ti));
2745 ti.type = IPFW_TABLE_CIDR;
2747 error = (opt == IP_FW_TABLE_ADD) ?
2748 add_table_entry(chain, &ti, &tei, 0, 1) :
2749 del_table_entry(chain, &ti, &tei, 0, 1);
2754 case IP_FW_TABLE_FLUSH:
2759 error = sooptcopyin(sopt, &tbl,
2760 sizeof(tbl), sizeof(tbl));
2763 memset(&ti, 0, sizeof(ti));
2765 error = flush_table(chain, &ti);
2769 case IP_FW_TABLE_GETSIZE:
2774 if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl),
2777 memset(&ti, 0, sizeof(ti));
2780 error = ipfw_count_table(chain, &ti, &cnt);
2781 IPFW_RUNLOCK(chain);
2784 error = sooptcopyout(sopt, &cnt, sizeof(cnt));
2788 case IP_FW_TABLE_LIST:
2793 if (sopt->sopt_valsize < sizeof(*tbl)) {
2797 size = sopt->sopt_valsize;
2798 tbl = malloc(size, M_TEMP, M_WAITOK);
2799 error = sooptcopyin(sopt, tbl, size, sizeof(*tbl));
2804 tbl->size = (size - sizeof(*tbl)) /
2805 sizeof(ipfw_table_entry);
2806 memset(&ti, 0, sizeof(ti));
2809 error = ipfw_dump_table_legacy(chain, &ti, tbl);
2810 IPFW_RUNLOCK(chain);
2815 error = sooptcopyout(sopt, tbl, size);
2820 /*--- NAT operations are protected by the IPFW_LOCK ---*/
2822 if (IPFW_NAT_LOADED)
2823 error = ipfw_nat_cfg_ptr(sopt);
2825 printf("IP_FW_NAT_CFG: %s\n",
2826 "ipfw_nat not present, please load it");
2832 if (IPFW_NAT_LOADED)
2833 error = ipfw_nat_del_ptr(sopt);
2835 printf("IP_FW_NAT_DEL: %s\n",
2836 "ipfw_nat not present, please load it");
2841 case IP_FW_NAT_GET_CONFIG:
2842 if (IPFW_NAT_LOADED)
2843 error = ipfw_nat_get_cfg_ptr(sopt);
2845 printf("IP_FW_NAT_GET_CFG: %s\n",
2846 "ipfw_nat not present, please load it");
2851 case IP_FW_NAT_GET_LOG:
2852 if (IPFW_NAT_LOADED)
2853 error = ipfw_nat_get_log_ptr(sopt);
2855 printf("IP_FW_NAT_GET_LOG: %s\n",
2856 "ipfw_nat not present, please load it");
2862 printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name);
2869 #define RULE_MAXSIZE (256*sizeof(u_int32_t))
2871 /* Functions to convert rules 7.2 <==> 8.0 */
2873 convert_rule_to_7(struct ip_fw_rule0 *rule)
2875 /* Used to modify original rule */
2876 struct ip_fw7 *rule7 = (struct ip_fw7 *)rule;
2877 /* copy of original rule, version 8 */
2878 struct ip_fw_rule0 *tmp;
2880 /* Used to copy commands */
2881 ipfw_insn *ccmd, *dst;
2882 int ll = 0, ccmdlen = 0;
2884 tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
2886 return 1; //XXX error
2888 bcopy(rule, tmp, RULE_MAXSIZE);
2891 //rule7->_pad = tmp->_pad;
2892 rule7->set = tmp->set;
2893 rule7->rulenum = tmp->rulenum;
2894 rule7->cmd_len = tmp->cmd_len;
2895 rule7->act_ofs = tmp->act_ofs;
2896 rule7->next_rule = (struct ip_fw7 *)tmp->next_rule;
2897 rule7->cmd_len = tmp->cmd_len;
2898 rule7->pcnt = tmp->pcnt;
2899 rule7->bcnt = tmp->bcnt;
2900 rule7->timestamp = tmp->timestamp;
2903 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ;
2904 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
2905 ccmdlen = F_LEN(ccmd);
2907 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
2909 if (dst->opcode > O_NAT)
2910 /* O_REASS doesn't exists in 7.2 version, so
2911 * decrement opcode if it is after O_REASS
2916 printf("ipfw: opcode %d size truncated\n",
2927 convert_rule_to_8(struct ip_fw_rule0 *rule)
2929 /* Used to modify original rule */
2930 struct ip_fw7 *rule7 = (struct ip_fw7 *) rule;
2932 /* Used to copy commands */
2933 ipfw_insn *ccmd, *dst;
2934 int ll = 0, ccmdlen = 0;
2936 /* Copy of original rule */
2937 struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
2939 return 1; //XXX error
2942 bcopy(rule7, tmp, RULE_MAXSIZE);
2944 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ;
2945 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
2946 ccmdlen = F_LEN(ccmd);
2948 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
2950 if (dst->opcode > O_NAT)
2951 /* O_REASS doesn't exists in 7.2 version, so
2952 * increment opcode if it is after O_REASS
2957 printf("ipfw: opcode %d size truncated\n",
2963 rule->_pad = tmp->_pad;
2964 rule->set = tmp->set;
2965 rule->rulenum = tmp->rulenum;
2966 rule->cmd_len = tmp->cmd_len;
2967 rule->act_ofs = tmp->act_ofs;
2968 rule->next_rule = (struct ip_fw *)tmp->next_rule;
2969 rule->cmd_len = tmp->cmd_len;
2970 rule->id = 0; /* XXX see if is ok = 0 */
2971 rule->pcnt = tmp->pcnt;
2972 rule->bcnt = tmp->bcnt;
2973 rule->timestamp = tmp->timestamp;
2985 * Allocate new bitmask which can be used to enlarge/shrink
2986 * named instance index.
2989 ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks)
2995 items = roundup2(items, BLOCK_ITEMS); /* Align to block size */
2996 max_blocks = items / BLOCK_ITEMS;
2998 idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK);
2999 /* Mark all as free */
3000 memset(idx_mask, 0xFF, size * IPFW_MAX_SETS);
3001 *idx_mask &= ~(u_long)1; /* Skip index 0 */
3004 *pblocks = max_blocks;
3008 * Copy current bitmask index to new one.
3011 ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks)
3013 int old_blocks, new_blocks;
3014 u_long *old_idx, *new_idx;
3017 old_idx = ni->idx_mask;
3018 old_blocks = ni->max_blocks;
3020 new_blocks = *blocks;
3022 for (i = 0; i < IPFW_MAX_SETS; i++) {
3023 memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i],
3024 old_blocks * sizeof(u_long));
3029 * Swaps current @ni index with new one.
3032 ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks)
3037 old_idx = ni->idx_mask;
3038 old_blocks = ni->max_blocks;
3040 ni->idx_mask = *idx;
3041 ni->max_blocks = *blocks;
3043 /* Save old values */
3045 *blocks = old_blocks;
3049 ipfw_objhash_bitmap_free(void *idx, int blocks)
3056 * Creates named hash instance.
3057 * Must be called without holding any locks.
3058 * Return pointer to new instance.
3060 struct namedobj_instance *
3061 ipfw_objhash_create(uint32_t items)
3063 struct namedobj_instance *ni;
3067 size = sizeof(struct namedobj_instance) +
3068 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE +
3069 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE;
3071 ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO);
3072 ni->nn_size = NAMEDOBJ_HASH_SIZE;
3073 ni->nv_size = NAMEDOBJ_HASH_SIZE;
3075 ni->names = (struct namedobjects_head *)(ni +1);
3076 ni->values = &ni->names[ni->nn_size];
3078 for (i = 0; i < ni->nn_size; i++)
3079 TAILQ_INIT(&ni->names[i]);
3081 for (i = 0; i < ni->nv_size; i++)
3082 TAILQ_INIT(&ni->values[i]);
3084 /* Set default hashing/comparison functions */
3085 ni->hash_f = objhash_hash_name;
3086 ni->cmp_f = objhash_cmp_name;
3088 /* Allocate bitmask separately due to possible resize */
3089 ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks);
3095 ipfw_objhash_destroy(struct namedobj_instance *ni)
3098 free(ni->idx_mask, M_IPFW);
3103 ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f,
3104 objhash_cmp_f *cmp_f)
3107 ni->hash_f = hash_f;
3112 objhash_hash_name(struct namedobj_instance *ni, void *name, uint32_t set)
3116 v = fnv_32_str((char *)name, FNV1_32_INIT);
3118 return (v % ni->nn_size);
3122 objhash_cmp_name(struct named_object *no, void *name, uint32_t set)
3125 if ((strcmp(no->name, (char *)name) == 0) && (no->set == set))
3132 objhash_hash_idx(struct namedobj_instance *ni, uint32_t val)
3136 v = val % (ni->nv_size - 1);
3141 struct named_object *
3142 ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set, char *name)
3144 struct named_object *no;
3147 hash = ni->hash_f(ni, name, set);
3149 TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
3150 if (ni->cmp_f(no, name, set) == 0)
3157 struct named_object *
3158 ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint16_t kidx)
3160 struct named_object *no;
3163 hash = objhash_hash_idx(ni, kidx);
3165 TAILQ_FOREACH(no, &ni->values[hash], nv_next) {
3166 if (no->kidx == kidx)
3174 ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a,
3175 struct named_object *b)
3178 if ((strcmp(a->name, b->name) == 0) && a->set == b->set)
3185 ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no)
3189 hash = ni->hash_f(ni, no->name, no->set);
3190 TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next);
3192 hash = objhash_hash_idx(ni, no->kidx);
3193 TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next);
3199 ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no)
3203 hash = ni->hash_f(ni, no->name, no->set);
3204 TAILQ_REMOVE(&ni->names[hash], no, nn_next);
3206 hash = objhash_hash_idx(ni, no->kidx);
3207 TAILQ_REMOVE(&ni->values[hash], no, nv_next);
3213 ipfw_objhash_count(struct namedobj_instance *ni)
3220 * Runs @func for each found named object.
3221 * It is safe to delete objects from callback
3224 ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg)
3226 struct named_object *no, *no_tmp;
3229 for (i = 0; i < ni->nn_size; i++) {
3230 TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp)
3236 * Removes index from given set.
3237 * Returns 0 on success.
3240 ipfw_objhash_free_idx(struct namedobj_instance *ni, uint16_t idx)
3245 i = idx / BLOCK_ITEMS;
3246 v = idx % BLOCK_ITEMS;
3248 if (i >= ni->max_blocks)
3251 mask = &ni->idx_mask[i];
3253 if ((*mask & ((u_long)1 << v)) != 0)
3257 *mask |= (u_long)1 << v;
3259 /* Update free offset */
3260 if (ni->free_off[0] > i)
3261 ni->free_off[0] = i;
3267 * Allocate new index in given instance and stores in in @pidx.
3268 * Returns 0 on success.
3271 ipfw_objhash_alloc_idx(void *n, uint16_t *pidx)
3273 struct namedobj_instance *ni;
3277 ni = (struct namedobj_instance *)n;
3279 off = ni->free_off[0];
3280 mask = &ni->idx_mask[off];
3282 for (i = off; i < ni->max_blocks; i++, mask++) {
3283 if ((v = ffsl(*mask)) == 0)
3287 *mask &= ~ ((u_long)1 << (v - 1));
3289 ni->free_off[0] = i;
3291 v = BLOCK_ITEMS * i + v - 1;