1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
9 #include <sys/rwlock.h>
10 #include <sys/malloc.h>
12 #include <sys/socket.h>
13 #include <sys/kernel.h>
15 int errno = 0, rte_errno = 0;
23 #include <sys/queue.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_common.h>
28 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
29 #include <rte_malloc.h>
31 #include <rte_eal_memconfig.h>
32 #include <rte_per_lcore.h>
33 #include <rte_string_fns.h>
34 #include <rte_errno.h>
35 #include <rte_rwlock.h>
36 #include <rte_spinlock.h>
37 #include <rte_tailq.h>
44 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
46 static struct rte_tailq_elem rte_lpm_tailq = {
49 EAL_REGISTER_TAILQ(rte_lpm_tailq)
52 #define MAX_DEPTH_TBL24 24
59 /* Macro to enable/disable run-time checks. */
60 #if defined(RTE_LIBRTE_LPM_DEBUG)
61 #include <rte_debug.h>
62 #define VERIFY_DEPTH(depth) do { \
63 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
64 rte_panic("LPM: Invalid depth (%u) at line %d", \
65 (unsigned)(depth), __LINE__); \
68 #define VERIFY_DEPTH(depth)
72 * Converts a given depth value to its corresponding mask value.
74 * depth (IN) : range = 1 - 32
75 * mask (OUT) : 32bit mask
77 static uint32_t __attribute__((pure))
78 depth_to_mask(uint8_t depth)
82 /* To calculate a mask start with a 1 on the left hand side and right
83 * shift while populating the left hand side with 1's
85 return (int)0x80000000 >> (depth - 1);
89 * Converts given depth value to its corresponding range value.
91 static uint32_t __attribute__((pure))
92 depth_to_range(uint8_t depth)
97 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
99 if (depth <= MAX_DEPTH_TBL24)
100 return 1 << (MAX_DEPTH_TBL24 - depth);
102 /* Else if depth is greater than 24 */
103 return 1 << (RTE_LPM_MAX_DEPTH - depth);
108 * Find an existing lpm table and return a pointer to it.
111 rte_lpm_find_existing(const char *name)
113 struct rte_lpm *l = NULL;
114 struct rte_tailq_entry *te;
115 struct rte_lpm_list *lpm_list;
117 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
119 rte_mcfg_tailq_read_lock();
120 TAILQ_FOREACH(te, lpm_list, next) {
122 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
125 rte_mcfg_tailq_read_unlock();
137 * Allocates memory for LPM object
140 rte_lpm_create(const char *name, int socket_id,
141 const struct rte_lpm_config *config)
143 char mem_name[RTE_LPM_NAMESIZE];
144 struct rte_lpm *lpm = NULL;
145 //struct rte_tailq_entry *te;
146 uint32_t mem_size, rules_size, tbl8s_size;
147 //struct rte_lpm_list *lpm_list;
149 //lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
151 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
153 /* Check user arguments. */
154 if ((name == NULL) || (socket_id < -1)
155 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
160 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
162 /* Determine the amount of memory to allocate. */
163 mem_size = sizeof(*lpm);
164 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
165 tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
166 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
169 rte_mcfg_tailq_write_lock();
171 /* guarantee there's no existing */
172 TAILQ_FOREACH(te, lpm_list, next) {
174 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
184 /* allocate tailq entry */
185 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
187 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
193 /* Allocate memory to store the LPM data structures. */
194 lpm = rte_zmalloc_socket(mem_name, mem_size,
195 RTE_CACHE_LINE_SIZE, socket_id);
197 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
203 lpm->rules_tbl = rte_zmalloc_socket(NULL,
204 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
206 if (lpm->rules_tbl == NULL) {
207 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
215 lpm->tbl8 = rte_zmalloc_socket(NULL,
216 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
218 if (lpm->tbl8 == NULL) {
219 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
220 rte_free(lpm->rules_tbl);
228 /* Save user arguments. */
229 lpm->max_rules = config->max_rules;
230 lpm->number_tbl8s = config->number_tbl8s;
231 strlcpy(lpm->name, name, sizeof(lpm->name));
235 //TAILQ_INSERT_TAIL(lpm_list, te, next);
238 rte_mcfg_tailq_write_unlock();
244 * Deallocates memory for given LPM table.
247 rte_lpm_free(struct rte_lpm *lpm)
250 struct rte_lpm_list *lpm_list;
251 struct rte_tailq_entry *te;
253 /* Check user arguments. */
257 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
259 rte_mcfg_tailq_write_lock();
261 /* find our tailq entry */
262 TAILQ_FOREACH(te, lpm_list, next) {
263 if (te->data == (void *) lpm)
267 TAILQ_REMOVE(lpm_list, te, next);
269 rte_mcfg_tailq_write_unlock();
273 rte_free(lpm->rules_tbl);
280 * Adds a rule to the rule table.
282 * NOTE: The rule table is split into 32 groups. Each group contains rules that
283 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
284 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
285 * to refer to depth 1 because even though the depth range is 1 - 32, depths
286 * are stored in the rule table from 0 - 31.
287 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
290 rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
293 uint32_t rule_gindex, rule_index, last_rule;
298 /* Scan through rule group to see if rule already exists. */
299 if (lpm->rule_info[depth - 1].used_rules > 0) {
301 /* rule_gindex stands for rule group index. */
302 rule_gindex = lpm->rule_info[depth - 1].first_rule;
303 /* Initialise rule_index to point to start of rule group. */
304 rule_index = rule_gindex;
305 /* Last rule = Last used rule in this rule group. */
306 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
308 for (; rule_index < last_rule; rule_index++) {
310 /* If rule already exists update next hop and return. */
311 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
313 if (lpm->rules_tbl[rule_index].next_hop
316 lpm->rules_tbl[rule_index].next_hop = next_hop;
322 if (rule_index == lpm->max_rules)
325 /* Calculate the position in which the rule will be stored. */
328 for (i = depth - 1; i > 0; i--) {
329 if (lpm->rule_info[i - 1].used_rules > 0) {
330 rule_index = lpm->rule_info[i - 1].first_rule
331 + lpm->rule_info[i - 1].used_rules;
335 if (rule_index == lpm->max_rules)
338 lpm->rule_info[depth - 1].first_rule = rule_index;
341 /* Make room for the new rule in the array. */
342 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
343 if (lpm->rule_info[i - 1].first_rule
344 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
347 if (lpm->rule_info[i - 1].used_rules > 0) {
348 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
349 + lpm->rule_info[i - 1].used_rules]
350 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
351 lpm->rule_info[i - 1].first_rule++;
355 /* Add the new rule. */
356 lpm->rules_tbl[rule_index].ip = ip_masked;
357 lpm->rules_tbl[rule_index].next_hop = next_hop;
359 /* Increment the used rules counter for this rule group. */
360 lpm->rule_info[depth - 1].used_rules++;
366 * Delete a rule from the rule table.
367 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
370 rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
376 lpm->rules_tbl[rule_index] =
377 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
378 + lpm->rule_info[depth - 1].used_rules - 1];
380 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
381 if (lpm->rule_info[i].used_rules > 0) {
382 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
383 lpm->rules_tbl[lpm->rule_info[i].first_rule
384 + lpm->rule_info[i].used_rules - 1];
385 lpm->rule_info[i].first_rule--;
389 lpm->rule_info[depth - 1].used_rules--;
393 * Finds a rule in rule table.
394 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
397 rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
399 uint32_t rule_gindex, last_rule, rule_index;
403 rule_gindex = lpm->rule_info[depth - 1].first_rule;
404 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
406 /* Scan used rules at given depth to find rule. */
407 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
408 /* If rule is found return the rule index. */
409 if (lpm->rules_tbl[rule_index].ip == ip_masked)
413 /* If rule is not found return -EINVAL. */
419 * Find, clean and allocate a tbl8.
422 tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
424 uint32_t group_idx; /* tbl8 group index. */
425 struct rte_lpm_tbl_entry *tbl8_entry;
427 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
428 for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
429 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
430 /* If a free tbl8 group is found clean it and set as VALID. */
431 if (!tbl8_entry->valid_group) {
432 struct rte_lpm_tbl_entry new_tbl8_entry = {
436 .valid_group = VALID,
439 memset(&tbl8_entry[0], 0,
440 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
441 sizeof(tbl8_entry[0]));
443 __atomic_store(tbl8_entry, &new_tbl8_entry,
446 /* Return group index for allocated tbl8 group. */
451 /* If there are no tbl8 groups free then return error. */
456 tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
458 /* Set tbl8 group invalid*/
459 struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
461 __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
465 static __rte_noinline int32_t
466 add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
469 #define group_idx next_hop
470 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
472 /* Calculate the index into Table24. */
473 tbl24_index = ip >> 8;
474 tbl24_range = depth_to_range(depth);
476 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
478 * For invalid OR valid and non-extended tbl 24 entries set
481 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
482 lpm->tbl24[i].depth <= depth)) {
484 struct rte_lpm_tbl_entry new_tbl24_entry = {
485 .next_hop = next_hop,
491 /* Setting tbl24 entry in one go to avoid race
494 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
500 if (lpm->tbl24[i].valid_group == 1) {
501 /* If tbl24 entry is valid and extended calculate the
504 tbl8_index = lpm->tbl24[i].group_idx *
505 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
506 tbl8_group_end = tbl8_index +
507 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
509 for (j = tbl8_index; j < tbl8_group_end; j++) {
510 if (!lpm->tbl8[j].valid ||
511 lpm->tbl8[j].depth <= depth) {
512 struct rte_lpm_tbl_entry
515 .valid_group = VALID,
517 .next_hop = next_hop,
521 * Setting tbl8 entry in one go to avoid
524 __atomic_store(&lpm->tbl8[j],
537 static __rte_noinline int32_t
538 add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
541 #define group_idx next_hop
542 uint32_t tbl24_index;
543 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
546 tbl24_index = (ip_masked >> 8);
547 tbl8_range = depth_to_range(depth);
549 if (!lpm->tbl24[tbl24_index].valid) {
550 /* Search for a free tbl8 group. */
551 tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
553 /* Check tbl8 allocation was successful. */
554 if (tbl8_group_index < 0) {
555 return tbl8_group_index;
558 /* Find index into tbl8 and range. */
559 tbl8_index = (tbl8_group_index *
560 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
563 /* Set tbl8 entry. */
564 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
565 struct rte_lpm_tbl_entry new_tbl8_entry = {
568 .valid_group = lpm->tbl8[i].valid_group,
569 .next_hop = next_hop,
571 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
576 * Update tbl24 entry to point to new tbl8 entry. Note: The
577 * ext_flag and tbl8_index need to be updated simultaneously,
578 * so assign whole structure in one go
581 struct rte_lpm_tbl_entry new_tbl24_entry = {
582 .group_idx = tbl8_group_index,
588 /* The tbl24 entry must be written only after the
589 * tbl8 entries are written.
591 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
594 } /* If valid entry but not extended calculate the index into Table8. */
595 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
596 /* Search for free tbl8 group. */
597 tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
599 if (tbl8_group_index < 0) {
600 return tbl8_group_index;
603 tbl8_group_start = tbl8_group_index *
604 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
605 tbl8_group_end = tbl8_group_start +
606 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
608 /* Populate new tbl8 with tbl24 value. */
609 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
610 struct rte_lpm_tbl_entry new_tbl8_entry = {
612 .depth = lpm->tbl24[tbl24_index].depth,
613 .valid_group = lpm->tbl8[i].valid_group,
614 .next_hop = lpm->tbl24[tbl24_index].next_hop,
616 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
620 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
622 /* Insert new rule into the tbl8 entry. */
623 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
624 struct rte_lpm_tbl_entry new_tbl8_entry = {
627 .valid_group = lpm->tbl8[i].valid_group,
628 .next_hop = next_hop,
630 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
635 * Update tbl24 entry to point to new tbl8 entry. Note: The
636 * ext_flag and tbl8_index need to be updated simultaneously,
637 * so assign whole structure in one go.
640 struct rte_lpm_tbl_entry new_tbl24_entry = {
641 .group_idx = tbl8_group_index,
647 /* The tbl24 entry must be written only after the
648 * tbl8 entries are written.
650 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
654 * If it is valid, extended entry calculate the index into tbl8.
656 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
657 tbl8_group_start = tbl8_group_index *
658 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
659 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
661 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
663 if (!lpm->tbl8[i].valid ||
664 lpm->tbl8[i].depth <= depth) {
665 struct rte_lpm_tbl_entry new_tbl8_entry = {
668 .next_hop = next_hop,
669 .valid_group = lpm->tbl8[i].valid_group,
673 * Setting tbl8 entry in one go to avoid race
676 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
691 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
697 /* Check user arguments. */
698 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
701 ip_masked = ip & depth_to_mask(depth);
704 /* Add the rule to the rule table. */
705 rule_index = rule_add(lpm, ip_masked, depth, next_hop);
707 /* Skip table entries update if The rule is the same as
708 * the rule in the rules table.
710 if (rule_index == -EEXIST)
713 /* If the is no space available for new rule return error. */
714 if (rule_index < 0) {
719 if (depth <= MAX_DEPTH_TBL24) {
720 status = add_depth_small(lpm, ip_masked, depth, next_hop);
721 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
722 status = add_depth_big(lpm, ip_masked, depth, next_hop);
725 * If add fails due to exhaustion of tbl8 extensions delete
726 * rule that was added to rule table.
729 //rule_delete(lpm, rule_index, depth);
740 * Look for a rule in the high-level rules table
743 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
749 /* Check user arguments. */
751 (next_hop == NULL) ||
752 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
755 /* Look for the rule using rule_find. */
756 ip_masked = ip & depth_to_mask(depth);
757 rule_index = rule_find(lpm, ip_masked, depth);
759 if (rule_index >= 0) {
760 *next_hop = lpm->rules_tbl[rule_index].next_hop;
764 /* If rule is not found return 0. */
769 find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
770 uint8_t *sub_rule_depth)
776 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
777 ip_masked = ip & depth_to_mask(prev_depth);
779 rule_index = rule_find(lpm, ip_masked, prev_depth);
781 if (rule_index >= 0) {
782 *sub_rule_depth = prev_depth;
792 delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
793 uint8_t depth, uint32_t sub_rule_nhop, uint8_t sub_rule_depth)
795 #define group_idx next_hop
796 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
798 /* Calculate the range and index into Table24. */
799 tbl24_range = depth_to_range(depth);
800 tbl24_index = (ip_masked >> 8);
801 struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
804 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
805 * and a positive number indicates a sub_rule_index.
807 if (sub_rule_nhop == 0) {
809 * If no replacement rule exists then invalidate entries
810 * associated with this rule.
812 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
814 if (lpm->tbl24[i].valid_group == 0 &&
815 lpm->tbl24[i].depth <= depth) {
816 __atomic_store(&lpm->tbl24[i],
817 &zero_tbl24_entry, __ATOMIC_RELEASE);
818 } else if (lpm->tbl24[i].valid_group == 1) {
820 * If TBL24 entry is extended, then there has
821 * to be a rule with depth >= 25 in the
822 * associated TBL8 group.
825 tbl8_group_index = lpm->tbl24[i].group_idx;
826 tbl8_index = tbl8_group_index *
827 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
829 for (j = tbl8_index; j < (tbl8_index +
830 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
832 if (lpm->tbl8[j].depth <= depth)
833 lpm->tbl8[j].valid = INVALID;
839 * If a replacement rule exists then modify entries
840 * associated with this rule.
843 struct rte_lpm_tbl_entry new_tbl24_entry = {
844 .next_hop = sub_rule_nhop,
847 .depth = sub_rule_depth,
850 struct rte_lpm_tbl_entry new_tbl8_entry = {
852 .valid_group = VALID,
853 .depth = sub_rule_depth,
854 .next_hop = sub_rule_nhop,
857 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
859 if (lpm->tbl24[i].valid_group == 0 &&
860 lpm->tbl24[i].depth <= depth) {
861 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
863 } else if (lpm->tbl24[i].valid_group == 1) {
865 * If TBL24 entry is extended, then there has
866 * to be a rule with depth >= 25 in the
867 * associated TBL8 group.
870 tbl8_group_index = lpm->tbl24[i].group_idx;
871 tbl8_index = tbl8_group_index *
872 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
874 for (j = tbl8_index; j < (tbl8_index +
875 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
877 if (lpm->tbl8[j].depth <= depth)
878 __atomic_store(&lpm->tbl8[j],
890 * Checks if table 8 group can be recycled.
892 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
893 * Return of -EINVAL means tbl8 is empty and thus can be recycled
894 * Return of value > -1 means tbl8 is in use but has all the same values and
895 * thus can be recycled
898 tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
899 uint32_t tbl8_group_start)
901 uint32_t tbl8_group_end, i;
902 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
905 * Check the first entry of the given tbl8. If it is invalid we know
906 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
907 * (As they would affect all entries in a tbl8) and thus this table
908 * can not be recycled.
910 if (tbl8[tbl8_group_start].valid) {
912 * If first entry is valid check if the depth is less than 24
913 * and if so check the rest of the entries to verify that they
914 * are all of this depth.
916 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
917 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
921 tbl8[tbl8_group_start].depth) {
926 /* If all entries are the same return the tb8 index */
927 return tbl8_group_start;
933 * If the first entry is invalid check if the rest of the entries in
934 * the tbl8 are invalid.
936 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
940 /* If no valid entries are found then return -EINVAL. */
945 delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
946 uint8_t depth, uint32_t sub_rule_nhop, uint8_t sub_rule_depth)
948 #define group_idx next_hop
949 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
951 int32_t tbl8_recycle_index;
954 * Calculate the index into tbl24 and range. Note: All depths larger
955 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
957 tbl24_index = ip_masked >> 8;
959 /* Calculate the index into tbl8 and range. */
960 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
961 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
962 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
963 tbl8_range = depth_to_range(depth);
965 if (sub_rule_nhop == 0) {
967 * Loop through the range of entries on tbl8 for which the
968 * rule_to_delete must be removed or modified.
970 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
971 if (lpm->tbl8[i].depth <= depth)
972 lpm->tbl8[i].valid = INVALID;
975 /* Set new tbl8 entry. */
976 struct rte_lpm_tbl_entry new_tbl8_entry = {
978 .depth = sub_rule_depth,
979 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
980 .next_hop = sub_rule_nhop,
984 * Loop through the range of entries on tbl8 for which the
985 * rule_to_delete must be modified.
987 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
988 if (lpm->tbl8[i].depth <= depth)
989 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
995 * Check if there are any valid entries in this tbl8 group. If all
996 * tbl8 entries are invalid we can free the tbl8 and invalidate the
997 * associated tbl24 entry.
1000 tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
1002 if (tbl8_recycle_index == -EINVAL) {
1003 /* Set tbl24 before freeing tbl8 to avoid race condition.
1004 * Prevent the free of the tbl8 group from hoisting.
1006 lpm->tbl24[tbl24_index].valid = 0;
1007 __atomic_thread_fence(__ATOMIC_RELEASE);
1008 tbl8_free(lpm->tbl8, tbl8_group_start);
1009 } else if (tbl8_recycle_index > -1) {
1010 /* Update tbl24 entry. */
1011 struct rte_lpm_tbl_entry new_tbl24_entry = {
1012 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1015 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1018 /* Set tbl24 before freeing tbl8 to avoid race condition.
1019 * Prevent the free of the tbl8 group from hoisting.
1021 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1023 __atomic_thread_fence(__ATOMIC_RELEASE);
1024 tbl8_free(lpm->tbl8, tbl8_group_start);
1034 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1035 uint8_t sub_rule_depth, uint32_t sub_rule_nhop)
1037 //int32_t rule_to_delete_index;
1039 //uint8_t sub_rule_depth;
1041 * Check input arguments. Note: IP must be a positive integer of 32
1042 * bits in length therefore it need not be checked.
1044 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1048 ip_masked = ip & depth_to_mask(depth);
1052 * Find the index of the input rule, that needs to be deleted, in the
1055 rule_to_delete_index = rule_find(lpm, ip_masked, depth);
1058 * Check if rule_to_delete_index was found. If no rule was found the
1059 * function rule_find returns -EINVAL.
1061 if (rule_to_delete_index < 0)
1064 /* Delete the rule from the rule table. */
1065 rule_delete(lpm, rule_to_delete_index, depth);
1069 * Find rule to replace the rule_to_delete. If there is no rule to
1070 * replace the rule_to_delete we return -1 and invalidate the table
1071 * entries associated with this rule.
1073 //sub_rule_depth = *psub_rule_depth;
1074 //sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
1077 * If the input depth value is less than 25 use function
1078 * delete_depth_small otherwise use delete_depth_big.
1080 if (depth <= MAX_DEPTH_TBL24) {
1081 return delete_depth_small(lpm, ip_masked, depth,
1082 sub_rule_nhop, sub_rule_depth);
1083 } else { /* If depth > MAX_DEPTH_TBL24 */
1084 return delete_depth_big(lpm, ip_masked, depth, sub_rule_nhop,
1090 * Delete all rules from the LPM table.
1093 rte_lpm_delete_all(struct rte_lpm *lpm)
1095 /* Zero rule information. */
1096 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1099 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1102 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1103 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1105 /* Delete all rules form the rules table. */
1106 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);