2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/module.h>
29 #include <dev/mlx5/driver.h>
30 #include "mlx5_core.h"
32 #include <linux/string.h>
33 #include <linux/compiler.h>
35 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
36 sizeof(struct init_tree_node))
38 #define ADD_PRIO(name_val, flags_val, min_level_val, max_ft_val, caps_val, \
39 ...) {.type = FS_TYPE_PRIO,\
41 .min_ft_level = min_level_val,\
43 .max_ft = max_ft_val,\
45 .children = (struct init_tree_node[]) {__VA_ARGS__},\
46 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
49 #define ADD_FT_PRIO(name_val, flags_val, max_ft_val, ...)\
50 ADD_PRIO(name_val, flags_val, 0, max_ft_val, {},\
53 #define ADD_NS(name_val, ...) {.type = FS_TYPE_NAMESPACE,\
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
59 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
62 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
64 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
65 .caps = (long[]) {__VA_ARGS__}}
67 #define BYPASS_MAX_FT 5
68 #define BYPASS_PRIO_MAX_FT 1
69 #define KERNEL_MAX_FT 3
70 #define LEFTOVER_MAX_FT 1
71 #define KENREL_MIN_LEVEL 3
72 #define LEFTOVER_MIN_LEVEL KENREL_MIN_LEVEL + 1
73 #define BYPASS_MIN_LEVEL MLX5_NUM_BYPASS_FTS + LEFTOVER_MIN_LEVEL
79 struct init_tree_node {
82 struct init_tree_node *children;
84 struct node_caps caps;
90 .type = FS_TYPE_NAMESPACE,
93 .children = (struct init_tree_node[]) {
94 ADD_PRIO("by_pass_prio", 0, BYPASS_MIN_LEVEL, 0,
95 FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
96 FS_CAP(flow_table_properties_nic_receive.modify_root)),
98 ADD_FT_PRIO("prio0", 0,
100 ADD_FT_PRIO("prio1", 0,
102 ADD_FT_PRIO("prio2", 0,
104 ADD_FT_PRIO("prio3", 0,
106 ADD_FT_PRIO("prio4", 0,
108 ADD_FT_PRIO("prio5", 0,
110 ADD_FT_PRIO("prio6", 0,
112 ADD_FT_PRIO("prio7", 0,
114 ADD_FT_PRIO("prio-mcast", 0,
115 BYPASS_PRIO_MAX_FT))),
116 ADD_PRIO("kernel_prio", 0, KENREL_MIN_LEVEL, 0, {},
118 ADD_FT_PRIO("prio_kernel-0", 0,
120 ADD_PRIO("leftovers_prio", MLX5_CORE_FS_PRIO_SHARED,
121 LEFTOVER_MIN_LEVEL, 0,
122 FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
123 FS_CAP(flow_table_properties_nic_receive.modify_root)),
124 ADD_NS("leftover_ns",
125 ADD_FT_PRIO("leftovers_prio-0",
126 MLX5_CORE_FS_PRIO_SHARED,
131 /* Tree creation functions */
133 static struct mlx5_flow_root_namespace *find_root(struct fs_base *node)
135 struct fs_base *parent;
137 /* Make sure we only read it once while we go up the tree */
138 while ((parent = node->parent))
141 if (node->type != FS_TYPE_NAMESPACE) {
145 return container_of(container_of(node,
146 struct mlx5_flow_namespace,
148 struct mlx5_flow_root_namespace,
152 static inline struct mlx5_core_dev *fs_get_dev(struct fs_base *node)
154 struct mlx5_flow_root_namespace *root = find_root(node);
161 static void fs_init_node(struct fs_base *node,
162 unsigned int refcount)
164 kref_init(&node->refcount);
165 atomic_set(&node->users_refcount, refcount);
166 init_completion(&node->complete);
167 INIT_LIST_HEAD(&node->list);
168 mutex_init(&node->lock);
171 static void _fs_add_node(struct fs_base *node,
173 struct fs_base *parent)
176 atomic_inc(&parent->users_refcount);
177 node->name = kstrdup_const(name, GFP_KERNEL);
178 node->parent = parent;
181 static void fs_add_node(struct fs_base *node,
182 struct fs_base *parent, const char *name,
183 unsigned int refcount)
185 fs_init_node(node, refcount);
186 _fs_add_node(node, name, parent);
189 static void _fs_put(struct fs_base *node, void (*kref_cb)(struct kref *kref),
192 static void fs_del_dst(struct mlx5_flow_rule *dst);
193 static void _fs_del_ft(struct mlx5_flow_table *ft);
194 static void fs_del_fg(struct mlx5_flow_group *fg);
195 static void fs_del_fte(struct fs_fte *fte);
197 static void cmd_remove_node(struct fs_base *base)
199 switch (base->type) {
200 case FS_TYPE_FLOW_DEST:
201 fs_del_dst(container_of(base, struct mlx5_flow_rule, base));
203 case FS_TYPE_FLOW_TABLE:
204 _fs_del_ft(container_of(base, struct mlx5_flow_table, base));
206 case FS_TYPE_FLOW_GROUP:
207 fs_del_fg(container_of(base, struct mlx5_flow_group, base));
209 case FS_TYPE_FLOW_ENTRY:
210 fs_del_fte(container_of(base, struct fs_fte, base));
217 static void __fs_remove_node(struct kref *kref)
219 struct fs_base *node = container_of(kref, struct fs_base, refcount);
222 mutex_lock(&node->parent->lock);
223 mutex_lock(&node->lock);
224 cmd_remove_node(node);
225 mutex_unlock(&node->lock);
226 complete(&node->complete);
228 mutex_unlock(&node->parent->lock);
229 _fs_put(node->parent, _fs_remove_node, false);
233 void _fs_remove_node(struct kref *kref)
235 struct fs_base *node = container_of(kref, struct fs_base, refcount);
237 __fs_remove_node(kref);
238 kfree_const(node->name);
242 static void fs_get(struct fs_base *node)
244 atomic_inc(&node->users_refcount);
247 static void _fs_put(struct fs_base *node, void (*kref_cb)(struct kref *kref),
250 struct fs_base *parent_node = node->parent;
252 if (parent_node && !parent_locked)
253 mutex_lock(&parent_node->lock);
254 if (atomic_dec_and_test(&node->users_refcount)) {
256 /*remove from parent's list*/
257 list_del_init(&node->list);
258 mutex_unlock(&parent_node->lock);
260 kref_put(&node->refcount, kref_cb);
261 if (parent_node && parent_locked)
262 mutex_lock(&parent_node->lock);
263 } else if (parent_node && !parent_locked) {
264 mutex_unlock(&parent_node->lock);
268 static void fs_put(struct fs_base *node)
270 _fs_put(node, __fs_remove_node, false);
273 static void fs_put_parent_locked(struct fs_base *node)
275 _fs_put(node, __fs_remove_node, true);
278 static void fs_remove_node(struct fs_base *node)
281 wait_for_completion(&node->complete);
282 kfree_const(node->name);
286 static void fs_remove_node_parent_locked(struct fs_base *node)
288 fs_put_parent_locked(node);
289 wait_for_completion(&node->complete);
290 kfree_const(node->name);
294 static struct fs_fte *fs_alloc_fte(u8 action,
302 fte = kzalloc(sizeof(*fte), GFP_KERNEL);
304 return ERR_PTR(-ENOMEM);
306 memcpy(fte->val, match_value, sizeof(fte->val));
307 fte->base.type = FS_TYPE_FLOW_ENTRY;
309 fte->flow_tag = flow_tag;
311 INIT_LIST_HEAD(&fte->dests);
312 fte->action = action;
317 static struct fs_fte *alloc_star_ft_entry(struct mlx5_flow_table *ft,
318 struct mlx5_flow_group *fg,
324 struct mlx5_flow_rule *dst;
326 if (fg->num_ftes == fg->max_ftes)
327 return ERR_PTR(-ENOSPC);
329 fte = fs_alloc_fte(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
330 MLX5_FS_DEFAULT_FLOW_TAG, match_value, index);
335 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
341 fte->base.parent = &fg->base;
343 dst->dest_attr.type = MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE;
344 dst->base.parent = &fte->base;
345 list_add(&dst->base.list, &fte->dests);
346 /* assumed that the callee creates the star rules sorted by index */
347 list_add_tail(&fte->base.list, &fg->ftes);
357 /* assume that fte can't be changed */
358 static void free_star_fte_entry(struct fs_fte *fte)
360 struct mlx5_flow_group *fg;
361 struct mlx5_flow_rule *dst, *temp;
363 fs_get_parent(fg, fte);
365 list_for_each_entry_safe(dst, temp, &fte->dests, base.list) {
367 list_del(&dst->base.list);
371 list_del(&fte->base.list);
376 static struct mlx5_flow_group *fs_alloc_fg(u32 *create_fg_in)
378 struct mlx5_flow_group *fg;
379 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
380 create_fg_in, match_criteria);
381 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
383 match_criteria_enable);
384 fg = kzalloc(sizeof(*fg), GFP_KERNEL);
386 return ERR_PTR(-ENOMEM);
388 INIT_LIST_HEAD(&fg->ftes);
389 fg->mask.match_criteria_enable = match_criteria_enable;
390 memcpy(&fg->mask.match_criteria, match_criteria,
391 sizeof(fg->mask.match_criteria));
392 fg->base.type = FS_TYPE_FLOW_GROUP;
393 fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
395 fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
396 end_flow_index) - fg->start_index + 1;
400 static struct mlx5_flow_table *find_next_ft(struct fs_prio *prio);
401 static struct mlx5_flow_table *find_prev_ft(struct mlx5_flow_table *curr,
402 struct fs_prio *prio);
404 /* assumed src_ft and dst_ft can't be freed */
405 static int fs_set_star_rule(struct mlx5_core_dev *dev,
406 struct mlx5_flow_table *src_ft,
407 struct mlx5_flow_table *dst_ft)
409 struct mlx5_flow_rule *src_dst;
410 struct fs_fte *src_fte;
413 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
415 src_dst = list_first_entry(&src_ft->star_rule.fte->dests,
416 struct mlx5_flow_rule, base.list);
417 match_value = mlx5_vzalloc(match_len);
419 mlx5_core_warn(dev, "failed to allocate inbox\n");
422 /*Create match context*/
424 fs_get_parent(src_fte, src_dst);
426 src_dst->dest_attr.ft = dst_ft;
428 err = mlx5_cmd_fs_set_fte(dev,
431 match_value, src_ft->type,
432 src_ft->id, src_fte->index,
433 src_ft->star_rule.fg->id,
441 fs_get(&dst_ft->base);
443 mlx5_cmd_fs_delete_fte(dev,
446 src_ft->type, src_ft->id,
455 static int connect_prev_fts(struct fs_prio *locked_prio,
456 struct fs_prio *prev_prio,
457 struct mlx5_flow_table *next_ft)
459 struct mlx5_flow_table *iter;
461 struct mlx5_core_dev *dev = fs_get_dev(&prev_prio->base);
466 mutex_lock(&prev_prio->base.lock);
467 fs_for_each_ft(iter, prev_prio) {
468 struct mlx5_flow_rule *src_dst =
469 list_first_entry(&iter->star_rule.fte->dests,
470 struct mlx5_flow_rule, base.list);
471 struct mlx5_flow_table *prev_ft = src_dst->dest_attr.ft;
473 if (prev_ft == next_ft)
476 err = fs_set_star_rule(dev, iter, next_ft);
479 "mlx5: flow steering can't connect prev and next\n");
482 /* Assume ft's prio is locked */
484 struct fs_prio *prio;
486 fs_get_parent(prio, prev_ft);
487 if (prio == locked_prio)
488 fs_put_parent_locked(&prev_ft->base);
490 fs_put(&prev_ft->base);
496 mutex_unlock(&prev_prio->base.lock);
500 static int create_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio)
502 struct mlx5_flow_group *fg;
506 struct mlx5_flow_table *next_ft;
507 struct mlx5_flow_table *prev_ft;
508 struct mlx5_flow_root_namespace *root = find_root(&prio->base);
509 int fg_inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
510 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
512 fg_in = mlx5_vzalloc(fg_inlen);
514 mlx5_core_warn(root->dev, "failed to allocate inbox\n");
518 match_value = mlx5_vzalloc(match_len);
520 mlx5_core_warn(root->dev, "failed to allocate inbox\n");
525 MLX5_SET(create_flow_group_in, fg_in, start_flow_index, ft->max_fte);
526 MLX5_SET(create_flow_group_in, fg_in, end_flow_index, ft->max_fte);
527 fg = fs_alloc_fg(fg_in);
532 ft->star_rule.fg = fg;
533 err = mlx5_cmd_fs_create_fg(fs_get_dev(&prio->base),
534 fg_in, ft->vport, ft->type,
540 ft->star_rule.fte = alloc_star_ft_entry(ft, fg,
543 if (IS_ERR(ft->star_rule.fte))
546 mutex_lock(&root->fs_chain_lock);
547 next_ft = find_next_ft(prio);
548 err = fs_set_star_rule(root->dev, ft, next_ft);
550 mutex_unlock(&root->fs_chain_lock);
554 struct fs_prio *parent;
556 fs_get_parent(parent, next_ft);
557 fs_put(&next_ft->base);
559 prev_ft = find_prev_ft(ft, prio);
561 struct fs_prio *prev_parent;
563 fs_get_parent(prev_parent, prev_ft);
565 err = connect_prev_fts(NULL, prev_parent, ft);
567 mutex_unlock(&root->fs_chain_lock);
568 goto destroy_chained_star_rule;
570 fs_put(&prev_ft->base);
572 mutex_unlock(&root->fs_chain_lock);
578 destroy_chained_star_rule:
579 fs_set_star_rule(fs_get_dev(&prio->base), ft, NULL);
581 fs_put(&next_ft->base);
583 free_star_fte_entry(ft->star_rule.fte);
584 mlx5_cmd_fs_destroy_fg(fs_get_dev(&ft->base), ft->vport,
595 static void destroy_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio)
598 struct mlx5_flow_root_namespace *root;
599 struct mlx5_core_dev *dev = fs_get_dev(&prio->base);
600 struct mlx5_flow_table *prev_ft, *next_ft;
601 struct fs_prio *prev_prio;
605 root = find_root(&prio->base);
608 "flow steering failed to find root of priority %s",
611 /* In order to ensure atomic deletion, first update
612 * prev ft to point on the next ft.
614 mutex_lock(&root->fs_chain_lock);
615 prev_ft = find_prev_ft(ft, prio);
616 next_ft = find_next_ft(prio);
618 fs_get_parent(prev_prio, prev_ft);
619 /*Prev is connected to ft, only if ft is the first(last) in the prio*/
620 err = connect_prev_fts(prio, prev_prio, next_ft);
622 mlx5_core_warn(root->dev,
623 "flow steering can't connect prev and next of flow table\n");
624 fs_put(&prev_ft->base);
627 err = fs_set_star_rule(root->dev, ft, NULL);
628 /*One put is for fs_get in find next ft*/
630 fs_put(&next_ft->base);
632 fs_put(&next_ft->base);
635 mutex_unlock(&root->fs_chain_lock);
636 err = mlx5_cmd_fs_destroy_fg(dev, ft->vport, ft->type, ft->id,
637 ft->star_rule.fg->id);
640 "flow steering can't destroy star entry group(index:%d) of ft:%s\n", ft->star_rule.fg->start_index,
642 free_star_fte_entry(ft->star_rule.fte);
644 kfree(ft->star_rule.fg);
645 ft->star_rule.fg = NULL;
648 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
651 struct fs_prio *iter_prio;
653 fs_for_each_prio(iter_prio, ns) {
654 if (iter_prio->prio == prio)
661 static unsigned int _alloc_new_level(struct fs_prio *prio,
662 struct mlx5_flow_namespace *match);
664 static unsigned int __alloc_new_level(struct mlx5_flow_namespace *ns,
665 struct fs_prio *prio)
667 unsigned int level = 0;
673 mutex_lock(&ns->base.lock);
674 fs_for_each_prio(p, ns) {
680 mutex_unlock(&ns->base.lock);
682 fs_get_parent(prio, ns);
684 WARN_ON(prio->base.type != FS_TYPE_PRIO);
686 return level + _alloc_new_level(prio, ns);
689 /* Called under lock of priority, hence locking all upper objects */
690 static unsigned int _alloc_new_level(struct fs_prio *prio,
691 struct mlx5_flow_namespace *match)
693 struct mlx5_flow_namespace *ns;
695 unsigned int level = 0;
700 mutex_lock(&prio->base.lock);
701 fs_for_each_ns_or_ft_reverse(it, prio) {
702 if (it->type == FS_TYPE_NAMESPACE) {
708 mutex_lock(&ns->base.lock);
709 fs_for_each_prio(p, ns)
711 mutex_unlock(&ns->base.lock);
716 struct mlx5_flow_table *ft;
719 mutex_unlock(&prio->base.lock);
720 return level + ft->level + 1;
724 fs_get_parent(ns, prio);
725 mutex_unlock(&prio->base.lock);
726 return __alloc_new_level(ns, prio) + level;
729 static unsigned int alloc_new_level(struct fs_prio *prio)
731 return _alloc_new_level(prio, NULL);
734 static int update_root_ft_create(struct mlx5_flow_root_namespace *root,
735 struct mlx5_flow_table *ft)
738 int min_level = INT_MAX;
741 min_level = root->root_ft->level;
743 if (ft->level < min_level)
744 err = mlx5_cmd_update_root_ft(root->dev, ft->type,
750 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
758 static struct mlx5_flow_table *_create_ft_common(struct mlx5_flow_namespace *ns,
760 struct fs_prio *fs_prio,
764 struct mlx5_flow_table *ft;
769 struct mlx5_flow_root_namespace *root = find_root(&ns->base);
770 struct mlx5_core_dev *dev = fs_get_dev(&ns->base);
774 "flow steering failed to find root of namespace %s",
776 return ERR_PTR(-ENODEV);
779 if (fs_prio->num_ft == fs_prio->max_ft)
780 return ERR_PTR(-ENOSPC);
782 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
784 return ERR_PTR(-ENOMEM);
786 fs_init_node(&ft->base, 1);
787 INIT_LIST_HEAD(&ft->fgs);
789 /* Temporarily WA until we expose the level set in the API */
790 if (root->table_type == FS_FT_ESW_EGRESS_ACL ||
791 root->table_type == FS_FT_ESW_INGRESS_ACL)
794 ft->level = alloc_new_level(fs_prio);
796 ft->base.type = FS_TYPE_FLOW_TABLE;
798 ft->type = root->table_type;
799 /*Two entries are reserved for star rules*/
800 ft_size = roundup_pow_of_two(max_fte + 2);
801 /*User isn't aware to those rules*/
802 ft->max_fte = ft_size - 2;
803 log_table_sz = ilog2(ft_size);
804 err = mlx5_cmd_fs_create_ft(root->dev, ft->vport, ft->type,
805 ft->level, log_table_sz, &ft->id);
809 err = create_star_rule(ft, fs_prio);
813 if ((root->table_type == FS_FT_NIC_RX) && MLX5_CAP_FLOWTABLE(root->dev,
814 flow_table_properties_nic_receive.modify_root)) {
815 err = update_root_ft_create(root, ft);
817 goto destroy_star_rule;
820 if (!name || !strlen(name)) {
821 snprintf(gen_name, 20, "flow_table_%u", ft->id);
822 _fs_add_node(&ft->base, gen_name, &fs_prio->base);
824 _fs_add_node(&ft->base, name, &fs_prio->base);
826 list_add_tail(&ft->base.list, &fs_prio->objs);
832 destroy_star_rule(ft, fs_prio);
834 mlx5_cmd_fs_destroy_ft(root->dev, ft->vport, ft->type, ft->id);
840 static struct mlx5_flow_table *create_ft_common(struct mlx5_flow_namespace *ns,
846 struct fs_prio *fs_prio = NULL;
847 fs_prio = find_prio(ns, prio);
849 return ERR_PTR(-EINVAL);
851 return _create_ft_common(ns, vport, fs_prio, max_fte, name);
855 static struct mlx5_flow_table *find_first_ft_in_ns(struct mlx5_flow_namespace *ns,
856 struct list_head *start);
858 static struct mlx5_flow_table *find_first_ft_in_prio(struct fs_prio *prio,
859 struct list_head *start);
861 static struct mlx5_flow_table *mlx5_create_autogrouped_shared_flow_table(struct fs_prio *fs_prio)
863 struct mlx5_flow_table *ft;
865 ft = find_first_ft_in_prio(fs_prio, &fs_prio->objs);
867 ft->shared_refcount++;
874 struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
877 int num_flow_table_entries,
880 struct mlx5_flow_table *ft = NULL;
881 struct fs_prio *fs_prio;
884 fs_prio = find_prio(ns, prio);
886 return ERR_PTR(-EINVAL);
888 is_shared_prio = fs_prio->flags & MLX5_CORE_FS_PRIO_SHARED;
889 if (is_shared_prio) {
890 mutex_lock(&fs_prio->shared_lock);
891 ft = mlx5_create_autogrouped_shared_flow_table(fs_prio);
897 ft = create_ft_common(ns, 0, prio, num_flow_table_entries,
902 ft->autogroup.active = true;
903 ft->autogroup.max_types = max_num_groups;
905 ft->shared_refcount = 1;
909 mutex_unlock(&fs_prio->shared_lock);
912 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
914 struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
918 int num_flow_table_entries)
920 return create_ft_common(ns, vport, prio, num_flow_table_entries, name);
922 EXPORT_SYMBOL(mlx5_create_vport_flow_table);
924 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
927 int num_flow_table_entries)
929 return create_ft_common(ns, 0, prio, num_flow_table_entries, name);
931 EXPORT_SYMBOL(mlx5_create_flow_table);
933 static void _fs_del_ft(struct mlx5_flow_table *ft)
936 struct mlx5_core_dev *dev = fs_get_dev(&ft->base);
937 struct fs_prio *prio;
939 err = mlx5_cmd_fs_destroy_ft(dev, ft->vport, ft->type, ft->id);
941 mlx5_core_warn(dev, "flow steering can't destroy ft %s\n",
944 fs_get_parent(prio, ft);
948 static int update_root_ft_destroy(struct mlx5_flow_root_namespace *root,
949 struct mlx5_flow_table *ft)
952 struct fs_prio *prio;
953 struct mlx5_flow_table *next_ft = NULL;
954 struct mlx5_flow_table *put_ft = NULL;
956 if (root->root_ft != ft)
959 fs_get_parent(prio, ft);
960 /*Assuming objs containis only flow tables and
961 * flow tables are sorted by level.
963 if (!list_is_last(&ft->base.list, &prio->objs)) {
964 next_ft = list_next_entry(ft, base.list);
966 next_ft = find_next_ft(prio);
971 err = mlx5_cmd_update_root_ft(root->dev, next_ft->type,
974 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
978 root->root_ft = next_ft;
981 fs_put(&put_ft->base);
986 /*Objects in the same prio are destroyed in the reverse order they were createrd*/
987 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
990 struct fs_prio *prio;
991 struct mlx5_flow_root_namespace *root;
993 struct mlx5_core_dev *dev;
995 fs_get_parent(prio, ft);
996 root = find_root(&prio->base);
997 dev = fs_get_dev(&prio->base);
1001 "flow steering failed to find root of priority %s",
1006 is_shared_prio = prio->flags & MLX5_CORE_FS_PRIO_SHARED;
1007 if (is_shared_prio) {
1008 mutex_lock(&prio->shared_lock);
1009 if (ft->shared_refcount > 1) {
1010 --ft->shared_refcount;
1012 mutex_unlock(&prio->shared_lock);
1017 mutex_lock(&prio->base.lock);
1018 mutex_lock(&ft->base.lock);
1020 err = update_root_ft_destroy(root, ft);
1024 /* delete two last entries */
1025 destroy_star_rule(ft, prio);
1027 mutex_unlock(&ft->base.lock);
1028 fs_remove_node_parent_locked(&ft->base);
1029 mutex_unlock(&prio->base.lock);
1031 mutex_unlock(&prio->shared_lock);
1036 mutex_unlock(&ft->base.lock);
1037 mutex_unlock(&prio->base.lock);
1039 mutex_unlock(&prio->shared_lock);
1043 EXPORT_SYMBOL(mlx5_destroy_flow_table);
1045 static struct mlx5_flow_group *fs_create_fg(struct mlx5_core_dev *dev,
1046 struct mlx5_flow_table *ft,
1047 struct list_head *prev,
1051 struct mlx5_flow_group *fg;
1053 unsigned int end_index;
1056 fg = fs_alloc_fg(fg_in);
1060 end_index = fg->start_index + fg->max_ftes - 1;
1061 err = mlx5_cmd_fs_create_fg(dev, fg_in,
1062 ft->vport, ft->type, ft->id,
1067 mutex_lock(&ft->base.lock);
1068 if (ft->autogroup.active)
1069 ft->autogroup.num_types++;
1071 snprintf(name, sizeof(name), "group_%u", fg->id);
1072 /*Add node to tree*/
1073 fs_add_node(&fg->base, &ft->base, name, refcount);
1074 /*Add node to group list*/
1075 list_add(&fg->base.list, prev);
1076 mutex_unlock(&ft->base.lock);
1082 return ERR_PTR(err);
1085 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1088 struct mlx5_flow_group *fg;
1089 struct mlx5_core_dev *dev = fs_get_dev(&ft->base);
1092 return ERR_PTR(-ENODEV);
1094 if (ft->autogroup.active)
1095 return ERR_PTR(-EPERM);
1097 fg = fs_create_fg(dev, ft, ft->fgs.prev, in, 1);
1101 EXPORT_SYMBOL(mlx5_create_flow_group);
1103 /*Group is destoyed when all the rules in the group were removed*/
1104 static void fs_del_fg(struct mlx5_flow_group *fg)
1106 struct mlx5_flow_table *parent_ft;
1107 struct mlx5_core_dev *dev;
1109 fs_get_parent(parent_ft, fg);
1110 dev = fs_get_dev(&parent_ft->base);
1113 if (parent_ft->autogroup.active)
1114 parent_ft->autogroup.num_types--;
1116 if (mlx5_cmd_fs_destroy_fg(dev, parent_ft->vport,
1118 parent_ft->id, fg->id))
1119 mlx5_core_warn(dev, "flow steering can't destroy fg\n");
1122 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
1124 fs_remove_node(&fg->base);
1126 EXPORT_SYMBOL(mlx5_destroy_flow_group);
1128 static bool _fs_match_exact_val(void *mask, void *val1, void *val2, size_t size)
1132 /* TODO: optimize by comparing 64bits when possible */
1133 for (i = 0; i < size; i++, mask++, val1++, val2++)
1134 if ((*((u8 *)val1) & (*(u8 *)mask)) !=
1135 ((*(u8 *)val2) & (*(u8 *)mask)))
1141 bool fs_match_exact_val(struct mlx5_core_fs_mask *mask,
1142 void *val1, void *val2)
1144 if (mask->match_criteria_enable &
1145 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
1146 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
1147 val1, outer_headers);
1148 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
1149 val2, outer_headers);
1150 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
1151 mask->match_criteria, outer_headers);
1153 if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
1154 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
1158 if (mask->match_criteria_enable &
1159 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
1160 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
1161 val1, misc_parameters);
1162 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
1163 val2, misc_parameters);
1164 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
1165 mask->match_criteria, misc_parameters);
1167 if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
1168 MLX5_ST_SZ_BYTES(fte_match_set_misc)))
1171 if (mask->match_criteria_enable &
1172 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
1173 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
1174 val1, inner_headers);
1175 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
1176 val2, inner_headers);
1177 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
1178 mask->match_criteria, inner_headers);
1180 if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
1181 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
1187 bool fs_match_exact_mask(u8 match_criteria_enable1,
1188 u8 match_criteria_enable2,
1189 void *mask1, void *mask2)
1191 return match_criteria_enable1 == match_criteria_enable2 &&
1192 !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
1195 static struct mlx5_flow_table *find_first_ft_in_ns_reverse(struct mlx5_flow_namespace *ns,
1196 struct list_head *start);
1198 static struct mlx5_flow_table *_find_first_ft_in_prio_reverse(struct fs_prio *prio,
1199 struct list_head *start)
1201 struct fs_base *it = container_of(start, struct fs_base, list);
1206 fs_for_each_ns_or_ft_continue_reverse(it, prio) {
1207 struct mlx5_flow_namespace *ns;
1208 struct mlx5_flow_table *ft;
1210 if (it->type == FS_TYPE_FLOW_TABLE) {
1217 WARN_ON(ns->base.type != FS_TYPE_NAMESPACE);
1219 ft = find_first_ft_in_ns_reverse(ns, &ns->prios);
1227 static struct mlx5_flow_table *find_first_ft_in_prio_reverse(struct fs_prio *prio,
1228 struct list_head *start)
1230 struct mlx5_flow_table *ft;
1235 mutex_lock(&prio->base.lock);
1236 ft = _find_first_ft_in_prio_reverse(prio, start);
1237 mutex_unlock(&prio->base.lock);
1242 static struct mlx5_flow_table *find_first_ft_in_ns_reverse(struct mlx5_flow_namespace *ns,
1243 struct list_head *start)
1245 struct fs_prio *prio;
1250 fs_get_obj(prio, container_of(start, struct fs_base, list));
1251 mutex_lock(&ns->base.lock);
1252 fs_for_each_prio_continue_reverse(prio, ns) {
1253 struct mlx5_flow_table *ft;
1255 ft = find_first_ft_in_prio_reverse(prio, &prio->objs);
1257 mutex_unlock(&ns->base.lock);
1261 mutex_unlock(&ns->base.lock);
1266 /* Returned a held ft, assumed curr is protected, assumed curr's parent is
1269 static struct mlx5_flow_table *find_prev_ft(struct mlx5_flow_table *curr,
1270 struct fs_prio *prio)
1272 struct mlx5_flow_table *ft = NULL;
1273 struct fs_base *curr_base;
1278 /* prio has either namespace or flow-tables, but not both */
1279 if (!list_empty(&prio->objs) &&
1280 list_first_entry(&prio->objs, struct mlx5_flow_table, base.list) !=
1284 while (!ft && prio) {
1285 struct mlx5_flow_namespace *ns;
1287 fs_get_parent(ns, prio);
1288 ft = find_first_ft_in_ns_reverse(ns, &prio->base.list);
1289 curr_base = &ns->base;
1290 fs_get_parent(prio, ns);
1293 ft = find_first_ft_in_prio_reverse(prio,
1299 static struct mlx5_flow_table *_find_first_ft_in_prio(struct fs_prio *prio,
1300 struct list_head *start)
1302 struct fs_base *it = container_of(start, struct fs_base, list);
1307 fs_for_each_ns_or_ft_continue(it, prio) {
1308 struct mlx5_flow_namespace *ns;
1309 struct mlx5_flow_table *ft;
1311 if (it->type == FS_TYPE_FLOW_TABLE) {
1318 WARN_ON(ns->base.type != FS_TYPE_NAMESPACE);
1320 ft = find_first_ft_in_ns(ns, &ns->prios);
1328 static struct mlx5_flow_table *find_first_ft_in_prio(struct fs_prio *prio,
1329 struct list_head *start)
1331 struct mlx5_flow_table *ft;
1336 mutex_lock(&prio->base.lock);
1337 ft = _find_first_ft_in_prio(prio, start);
1338 mutex_unlock(&prio->base.lock);
1343 static struct mlx5_flow_table *find_first_ft_in_ns(struct mlx5_flow_namespace *ns,
1344 struct list_head *start)
1346 struct fs_prio *prio;
1351 fs_get_obj(prio, container_of(start, struct fs_base, list));
1352 mutex_lock(&ns->base.lock);
1353 fs_for_each_prio_continue(prio, ns) {
1354 struct mlx5_flow_table *ft;
1356 ft = find_first_ft_in_prio(prio, &prio->objs);
1358 mutex_unlock(&ns->base.lock);
1362 mutex_unlock(&ns->base.lock);
1367 /* returned a held ft, assumed curr is protected, assumed curr's parent is
1370 static struct mlx5_flow_table *find_next_ft(struct fs_prio *prio)
1372 struct mlx5_flow_table *ft = NULL;
1373 struct fs_base *curr_base;
1375 while (!ft && prio) {
1376 struct mlx5_flow_namespace *ns;
1378 fs_get_parent(ns, prio);
1379 ft = find_first_ft_in_ns(ns, &prio->base.list);
1380 curr_base = &ns->base;
1381 fs_get_parent(prio, ns);
1384 ft = _find_first_ft_in_prio(prio, &curr_base->list);
1390 /* called under ft mutex lock */
1391 static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
1392 u8 match_criteria_enable,
1393 u32 *match_criteria)
1395 unsigned int group_size;
1396 unsigned int candidate_index = 0;
1397 unsigned int candidate_group_num = 0;
1398 struct mlx5_flow_group *g;
1399 struct mlx5_flow_group *ret;
1400 struct list_head *prev = &ft->fgs;
1401 struct mlx5_core_dev *dev;
1403 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1404 void *match_criteria_addr;
1406 if (!ft->autogroup.active)
1407 return ERR_PTR(-ENOENT);
1409 dev = fs_get_dev(&ft->base);
1411 return ERR_PTR(-ENODEV);
1413 in = mlx5_vzalloc(inlen);
1415 mlx5_core_warn(dev, "failed to allocate inbox\n");
1416 return ERR_PTR(-ENOMEM);
1420 if (ft->autogroup.num_types < ft->autogroup.max_types)
1421 group_size = ft->max_fte / (ft->autogroup.max_types + 1);
1425 if (group_size == 0) {
1427 "flow steering can't create group size of 0\n");
1428 ret = ERR_PTR(-EINVAL);
1432 /* sorted by start_index */
1433 fs_for_each_fg(g, ft) {
1434 candidate_group_num++;
1435 if (candidate_index + group_size > g->start_index)
1436 candidate_index = g->start_index + g->max_ftes;
1439 prev = &g->base.list;
1442 if (candidate_index + group_size > ft->max_fte) {
1443 ret = ERR_PTR(-ENOSPC);
1447 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1448 match_criteria_enable);
1449 MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
1450 MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index +
1452 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1453 in, match_criteria);
1454 memcpy(match_criteria_addr, match_criteria,
1455 MLX5_ST_SZ_BYTES(fte_match_param));
1457 ret = fs_create_fg(dev, ft, prev, in, 0);
1463 static struct mlx5_flow_namespace *get_ns_with_notifiers(struct fs_base *node)
1465 struct mlx5_flow_namespace *ns = NULL;
1467 while (node && (node->type != FS_TYPE_NAMESPACE ||
1468 list_empty(&container_of(node, struct
1469 mlx5_flow_namespace,
1470 base)->list_notifiers)))
1471 node = node->parent;
1474 fs_get_obj(ns, node);
1480 /*Assumption- fte is locked*/
1481 static void call_to_add_rule_notifiers(struct mlx5_flow_rule *dst,
1484 struct mlx5_flow_namespace *ns;
1485 struct mlx5_flow_handler *iter_handler;
1486 struct fs_client_priv_data *iter_client;
1488 bool is_new_rule = list_first_entry(&fte->dests,
1489 struct mlx5_flow_rule,
1493 ns = get_ns_with_notifiers(&fte->base);
1497 down_read(&ns->notifiers_rw_sem);
1498 list_for_each_entry(iter_handler, &ns->list_notifiers,
1500 if (iter_handler->add_dst_cb) {
1502 mutex_lock(&dst->clients_lock);
1503 list_for_each_entry(
1504 iter_client, &dst->clients_data, list) {
1505 if (iter_client->fs_handler == iter_handler) {
1506 data = iter_client->client_dst_data;
1510 mutex_unlock(&dst->clients_lock);
1511 err = iter_handler->add_dst_cb(dst,
1514 iter_handler->client_context);
1519 up_read(&ns->notifiers_rw_sem);
1522 static void call_to_del_rule_notifiers(struct mlx5_flow_rule *dst,
1525 struct mlx5_flow_namespace *ns;
1526 struct mlx5_flow_handler *iter_handler;
1527 struct fs_client_priv_data *iter_client;
1529 bool ctx_changed = (fte->dests_size == 0);
1531 ns = get_ns_with_notifiers(&fte->base);
1534 down_read(&ns->notifiers_rw_sem);
1535 list_for_each_entry(iter_handler, &ns->list_notifiers,
1538 mutex_lock(&dst->clients_lock);
1539 list_for_each_entry(iter_client, &dst->clients_data, list) {
1540 if (iter_client->fs_handler == iter_handler) {
1541 data = iter_client->client_dst_data;
1545 mutex_unlock(&dst->clients_lock);
1546 if (iter_handler->del_dst_cb) {
1547 iter_handler->del_dst_cb(dst, ctx_changed, data,
1548 iter_handler->client_context);
1551 up_read(&ns->notifiers_rw_sem);
1554 /* fte should not be deleted while calling this function */
1555 static struct mlx5_flow_rule *_fs_add_dst_fte(struct fs_fte *fte,
1556 struct mlx5_flow_group *fg,
1557 struct mlx5_flow_destination *dest)
1559 struct mlx5_flow_table *ft;
1560 struct mlx5_flow_rule *dst;
1563 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1565 return ERR_PTR(-ENOMEM);
1567 memcpy(&dst->dest_attr, dest, sizeof(*dest));
1568 dst->base.type = FS_TYPE_FLOW_DEST;
1569 INIT_LIST_HEAD(&dst->clients_data);
1570 mutex_init(&dst->clients_lock);
1571 fs_get_parent(ft, fg);
1572 /*Add dest to dests list- added as first element after the head*/
1573 list_add_tail(&dst->base.list, &fte->dests);
1575 err = mlx5_cmd_fs_set_fte(fs_get_dev(&ft->base),
1579 ft->id, fte->index, fg->id, fte->flow_tag,
1580 fte->action, fte->dests_size, &fte->dests);
1584 list_del(&dst->base.list);
1589 list_del(&dst->base.list);
1592 return ERR_PTR(err);
1595 static char *get_dest_name(struct mlx5_flow_destination *dest)
1597 char *name = kzalloc(sizeof(char) * 20, GFP_KERNEL);
1599 switch (dest->type) {
1600 case MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE:
1601 snprintf(name, 20, "dest_%s_%u", "flow_table",
1604 case MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT:
1605 snprintf(name, 20, "dest_%s_%u", "vport",
1608 case MLX5_FLOW_CONTEXT_DEST_TYPE_TIR:
1609 snprintf(name, 20, "dest_%s_%u", "tir", dest->tir_num);
1617 /* assumed fg is locked */
1618 static unsigned int fs_get_free_fg_index(struct mlx5_flow_group *fg,
1619 struct list_head **prev)
1622 unsigned int start = fg->start_index;
1627 /* assumed list is sorted by index */
1628 fs_for_each_fte(fte, fg) {
1629 if (fte->index != start)
1633 *prev = &fte->base.list;
1640 static struct fs_fte *fs_create_fte(struct mlx5_flow_group *fg,
1644 struct list_head **prev)
1649 index = fs_get_free_fg_index(fg, prev);
1650 fte = fs_alloc_fte(action, flow_tag, match_value, index);
1657 static void add_rule_to_tree(struct mlx5_flow_rule *rule,
1662 dest_name = get_dest_name(&rule->dest_attr);
1663 fs_add_node(&rule->base, &fte->base, dest_name, 1);
1664 /* re-add to list, since fs_add_node reset our list */
1665 list_add_tail(&rule->base.list, &fte->dests);
1667 call_to_add_rule_notifiers(rule, fte);
1670 static void fs_del_dst(struct mlx5_flow_rule *dst)
1672 struct mlx5_flow_table *ft;
1673 struct mlx5_flow_group *fg;
1676 struct mlx5_core_dev *dev = fs_get_dev(&dst->base);
1677 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
1682 match_value = mlx5_vzalloc(match_len);
1684 mlx5_core_warn(dev, "failed to allocate inbox\n");
1688 fs_get_parent(fte, dst);
1689 fs_get_parent(fg, fte);
1690 mutex_lock(&fg->base.lock);
1691 memcpy(match_value, fte->val, sizeof(fte->val));
1692 /* ft can't be changed as fg is locked */
1693 fs_get_parent(ft, fg);
1694 list_del(&dst->base.list);
1696 if (fte->dests_size) {
1697 err = mlx5_cmd_fs_set_fte(dev, ft->vport,
1698 &fte->status, match_value, ft->type,
1699 ft->id, fte->index, fg->id,
1700 fte->flow_tag, fte->action,
1701 fte->dests_size, &fte->dests);
1703 mlx5_core_warn(dev, "%s can't delete dst %s\n",
1704 __func__, dst->base.name);
1708 call_to_del_rule_notifiers(dst, fte);
1710 mutex_unlock(&fg->base.lock);
1711 kvfree(match_value);
1714 static void fs_del_fte(struct fs_fte *fte)
1716 struct mlx5_flow_table *ft;
1717 struct mlx5_flow_group *fg;
1719 struct mlx5_core_dev *dev;
1721 fs_get_parent(fg, fte);
1722 fs_get_parent(ft, fg);
1724 dev = fs_get_dev(&ft->base);
1727 err = mlx5_cmd_fs_delete_fte(dev, ft->vport, &fte->status,
1728 ft->type, ft->id, fte->index);
1730 mlx5_core_warn(dev, "flow steering can't delete fte %s\n",
1736 /* assuming parent fg is locked */
1737 /* Add dst algorithm */
1738 static struct mlx5_flow_rule *fs_add_dst_fg(struct mlx5_flow_group *fg,
1742 struct mlx5_flow_destination *dest)
1745 struct mlx5_flow_rule *dst;
1746 struct mlx5_flow_table *ft;
1747 struct list_head *prev;
1750 mutex_lock(&fg->base.lock);
1751 fs_for_each_fte(fte, fg) {
1752 /* TODO: Check of size against PRM max size */
1753 mutex_lock(&fte->base.lock);
1754 if (fs_match_exact_val(&fg->mask, match_value, &fte->val) &&
1755 action == fte->action && flow_tag == fte->flow_tag) {
1756 dst = _fs_add_dst_fte(fte, fg, dest);
1757 mutex_unlock(&fte->base.lock);
1762 mutex_unlock(&fte->base.lock);
1765 fs_get_parent(ft, fg);
1766 if (fg->num_ftes == fg->max_ftes) {
1767 dst = ERR_PTR(-ENOSPC);
1771 fte = fs_create_fte(fg, match_value, action, flow_tag, &prev);
1776 dst = _fs_add_dst_fte(fte, fg, dest);
1784 snprintf(fte_name, sizeof(fte_name), "fte%u", fte->index);
1785 /* Add node to tree */
1786 fs_add_node(&fte->base, &fg->base, fte_name, 0);
1787 list_add(&fte->base.list, prev);
1789 add_rule_to_tree(dst, fte);
1791 mutex_unlock(&fg->base.lock);
1795 static struct mlx5_flow_rule *fs_add_dst_ft(struct mlx5_flow_table *ft,
1796 u8 match_criteria_enable,
1797 u32 *match_criteria,
1799 u8 action, u32 flow_tag,
1800 struct mlx5_flow_destination *dest)
1802 /*? where dst_entry is allocated*/
1803 struct mlx5_flow_group *g;
1804 struct mlx5_flow_rule *dst;
1807 mutex_lock(&ft->base.lock);
1808 fs_for_each_fg(g, ft)
1809 if (fs_match_exact_mask(g->mask.match_criteria_enable,
1810 match_criteria_enable,
1811 g->mask.match_criteria,
1813 mutex_unlock(&ft->base.lock);
1815 dst = fs_add_dst_fg(g, match_value,
1816 action, flow_tag, dest);
1817 if (PTR_ERR(dst) && PTR_ERR(dst) != -ENOSPC)
1820 mutex_unlock(&ft->base.lock);
1822 g = create_autogroup(ft, match_criteria_enable, match_criteria);
1828 dst = fs_add_dst_fg(g, match_value,
1829 action, flow_tag, dest);
1831 /* Remove assumes refcount > 0 and autogroup creates a group
1832 * with a refcount = 0.
1835 fs_remove_node(&g->base);
1844 struct mlx5_flow_rule *
1845 mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1846 u8 match_criteria_enable,
1847 u32 *match_criteria,
1851 struct mlx5_flow_destination *dest)
1853 struct mlx5_flow_rule *dst;
1854 struct mlx5_flow_namespace *ns;
1856 ns = get_ns_with_notifiers(&ft->base);
1858 down_read(&ns->dests_rw_sem);
1859 dst = fs_add_dst_ft(ft, match_criteria_enable, match_criteria,
1860 match_value, action, flow_tag, dest);
1862 up_read(&ns->dests_rw_sem);
1868 EXPORT_SYMBOL(mlx5_add_flow_rule);
1870 void mlx5_del_flow_rule(struct mlx5_flow_rule *dst)
1872 struct mlx5_flow_namespace *ns;
1874 ns = get_ns_with_notifiers(&dst->base);
1876 down_read(&ns->dests_rw_sem);
1877 fs_remove_node(&dst->base);
1879 up_read(&ns->dests_rw_sem);
1881 EXPORT_SYMBOL(mlx5_del_flow_rule);
1883 #define MLX5_CORE_FS_ROOT_NS_NAME "root"
1884 #define MLX5_CORE_FS_ESW_EGRESS_ACL "esw_egress_root"
1885 #define MLX5_CORE_FS_ESW_INGRESS_ACL "esw_ingress_root"
1886 #define MLX5_CORE_FS_FDB_ROOT_NS_NAME "fdb_root"
1887 #define MLX5_CORE_FS_SNIFFER_RX_ROOT_NS_NAME "sniffer_rx_root"
1888 #define MLX5_CORE_FS_SNIFFER_TX_ROOT_NS_NAME "sniffer_tx_root"
1889 #define MLX5_CORE_FS_PRIO_MAX_FT 4
1890 #define MLX5_CORE_FS_PRIO_MAX_NS 1
1892 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
1893 unsigned prio, int max_ft,
1894 const char *name, u8 flags)
1896 struct fs_prio *fs_prio;
1898 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
1900 return ERR_PTR(-ENOMEM);
1902 fs_prio->base.type = FS_TYPE_PRIO;
1903 fs_add_node(&fs_prio->base, &ns->base, name, 1);
1904 fs_prio->max_ft = max_ft;
1905 fs_prio->max_ns = MLX5_CORE_FS_PRIO_MAX_NS;
1906 fs_prio->prio = prio;
1907 fs_prio->flags = flags;
1908 list_add_tail(&fs_prio->base.list, &ns->prios);
1909 INIT_LIST_HEAD(&fs_prio->objs);
1910 mutex_init(&fs_prio->shared_lock);
1915 static void cleanup_root_ns(struct mlx5_core_dev *dev)
1917 struct mlx5_flow_root_namespace *root_ns = dev->root_ns;
1918 struct fs_prio *iter_prio;
1924 fs_for_each_prio(iter_prio, &root_ns->ns) {
1925 struct mlx5_flow_namespace *iter_ns;
1927 fs_for_each_ns(iter_ns, iter_prio) {
1928 while (!list_empty(&iter_ns->prios)) {
1929 struct fs_base *iter_prio2 =
1930 list_first_entry(&iter_ns->prios,
1934 fs_remove_node(iter_prio2);
1940 fs_for_each_prio(iter_prio, &root_ns->ns) {
1941 while (!list_empty(&iter_prio->objs)) {
1942 struct fs_base *iter_ns =
1943 list_first_entry(&iter_prio->objs,
1947 fs_remove_node(iter_ns);
1951 while (!list_empty(&root_ns->ns.prios)) {
1952 struct fs_base *iter_prio =
1953 list_first_entry(&root_ns->ns.prios,
1957 fs_remove_node(iter_prio);
1960 fs_remove_node(&root_ns->ns.base);
1961 dev->root_ns = NULL;
1964 static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
1965 struct mlx5_flow_root_namespace *root_ns)
1967 struct fs_base *prio;
1972 if (!list_empty(&root_ns->ns.prios)) {
1973 prio = list_first_entry(&root_ns->ns.prios,
1976 fs_remove_node(prio);
1978 fs_remove_node(&root_ns->ns.base);
1982 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
1984 cleanup_root_ns(dev);
1985 cleanup_single_prio_root_ns(dev, dev->sniffer_rx_root_ns);
1986 cleanup_single_prio_root_ns(dev, dev->sniffer_tx_root_ns);
1987 cleanup_single_prio_root_ns(dev, dev->fdb_root_ns);
1988 cleanup_single_prio_root_ns(dev, dev->esw_egress_root_ns);
1989 cleanup_single_prio_root_ns(dev, dev->esw_ingress_root_ns);
1992 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
1995 ns->base.type = FS_TYPE_NAMESPACE;
1996 init_rwsem(&ns->dests_rw_sem);
1997 init_rwsem(&ns->notifiers_rw_sem);
1998 INIT_LIST_HEAD(&ns->prios);
1999 INIT_LIST_HEAD(&ns->list_notifiers);
2004 static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev,
2009 struct mlx5_flow_root_namespace *root_ns;
2010 struct mlx5_flow_namespace *ns;
2012 /* create the root namespace */
2013 root_ns = mlx5_vzalloc(sizeof(*root_ns));
2018 root_ns->table_type = table_type;
2019 mutex_init(&root_ns->fs_chain_lock);
2022 fs_init_namespace(ns);
2023 fs_add_node(&ns->base, NULL, name, 1);
2030 static int init_fdb_root_ns(struct mlx5_core_dev *dev)
2032 struct fs_prio *prio;
2034 dev->fdb_root_ns = create_root_ns(dev, FS_FT_FDB,
2035 MLX5_CORE_FS_FDB_ROOT_NS_NAME);
2036 if (!dev->fdb_root_ns)
2040 prio = fs_create_prio(&dev->fdb_root_ns->ns, 0, 1, "fdb_prio", 0);
2042 return PTR_ERR(prio);
2047 #define MAX_VPORTS 128
2049 static int init_egress_acl_root_ns(struct mlx5_core_dev *dev)
2051 struct fs_prio *prio;
2053 dev->esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL,
2054 MLX5_CORE_FS_ESW_EGRESS_ACL);
2055 if (!dev->esw_egress_root_ns)
2059 prio = fs_create_prio(&dev->esw_egress_root_ns->ns, 0, MAX_VPORTS,
2060 "esw_egress_prio", 0);
2062 return PTR_ERR(prio);
2067 static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
2069 struct fs_prio *prio;
2071 dev->esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL,
2072 MLX5_CORE_FS_ESW_INGRESS_ACL);
2073 if (!dev->esw_ingress_root_ns)
2077 prio = fs_create_prio(&dev->esw_ingress_root_ns->ns, 0, MAX_VPORTS,
2078 "esw_ingress_prio", 0);
2080 return PTR_ERR(prio);
2085 static int init_sniffer_rx_root_ns(struct mlx5_core_dev *dev)
2087 struct fs_prio *prio;
2089 dev->sniffer_rx_root_ns = create_root_ns(dev, FS_FT_SNIFFER_RX,
2090 MLX5_CORE_FS_SNIFFER_RX_ROOT_NS_NAME);
2091 if (!dev->sniffer_rx_root_ns)
2095 prio = fs_create_prio(&dev->sniffer_rx_root_ns->ns, 0, 1,
2098 return PTR_ERR(prio);
2104 static int init_sniffer_tx_root_ns(struct mlx5_core_dev *dev)
2106 struct fs_prio *prio;
2108 dev->sniffer_tx_root_ns = create_root_ns(dev, FS_FT_SNIFFER_TX,
2109 MLX5_CORE_FS_SNIFFER_TX_ROOT_NS_NAME);
2110 if (!dev->sniffer_tx_root_ns)
2114 prio = fs_create_prio(&dev->sniffer_tx_root_ns->ns, 0, 1,
2117 return PTR_ERR(prio);
2122 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2125 struct mlx5_flow_namespace *ns;
2127 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2129 return ERR_PTR(-ENOMEM);
2131 fs_init_namespace(ns);
2132 fs_add_node(&ns->base, &prio->base, name, 1);
2133 list_add_tail(&ns->base.list, &prio->objs);
2138 #define FLOW_TABLE_BIT_SZ 1
2139 #define GET_FLOW_TABLE_CAP(dev, offset) \
2140 ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \
2142 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2144 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2148 for (i = 0; i < caps->arr_sz; i++) {
2149 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2155 static int _init_root_tree(struct mlx5_core_dev *dev, int max_ft_level,
2156 struct init_tree_node *node, struct fs_base *base_parent,
2157 struct init_tree_node *tree_parent)
2159 struct mlx5_flow_namespace *fs_ns;
2160 struct fs_prio *fs_prio;
2162 struct fs_base *base;
2166 if (node->type == FS_TYPE_PRIO) {
2167 if ((node->min_ft_level > max_ft_level) ||
2168 !has_required_caps(dev, &node->caps))
2171 fs_get_obj(fs_ns, base_parent);
2172 priority = node - tree_parent->children;
2173 fs_prio = fs_create_prio(fs_ns, priority,
2175 node->name, node->flags);
2176 if (IS_ERR(fs_prio)) {
2177 err = PTR_ERR(fs_prio);
2180 base = &fs_prio->base;
2181 } else if (node->type == FS_TYPE_NAMESPACE) {
2182 fs_get_obj(fs_prio, base_parent);
2183 fs_ns = fs_create_namespace(fs_prio, node->name);
2184 if (IS_ERR(fs_ns)) {
2185 err = PTR_ERR(fs_ns);
2188 base = &fs_ns->base;
2192 for (i = 0; i < node->ar_size; i++) {
2193 err = _init_root_tree(dev, max_ft_level, &node->children[i], base,
2202 static int init_root_tree(struct mlx5_core_dev *dev, int max_ft_level,
2203 struct init_tree_node *node, struct fs_base *parent)
2206 struct mlx5_flow_namespace *fs_ns;
2209 fs_get_obj(fs_ns, parent);
2210 for (i = 0; i < node->ar_size; i++) {
2211 err = _init_root_tree(dev, max_ft_level,
2212 &node->children[i], &fs_ns->base, node);
2219 static int sum_max_ft_in_prio(struct fs_prio *prio);
2220 static int sum_max_ft_in_ns(struct mlx5_flow_namespace *ns)
2222 struct fs_prio *prio;
2225 fs_for_each_prio(prio, ns) {
2226 sum += sum_max_ft_in_prio(prio);
2231 static int sum_max_ft_in_prio(struct fs_prio *prio)
2235 struct mlx5_flow_namespace *ns;
2238 return prio->max_ft;
2240 fs_for_each_ns_or_ft(it, prio) {
2241 if (it->type == FS_TYPE_FLOW_TABLE)
2245 sum += sum_max_ft_in_ns(ns);
2251 static void set_max_ft(struct mlx5_flow_namespace *ns)
2253 struct fs_prio *prio;
2258 fs_for_each_prio(prio, ns)
2259 sum_max_ft_in_prio(prio);
2262 static int init_root_ns(struct mlx5_core_dev *dev)
2264 int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
2265 flow_table_properties_nic_receive.
2268 dev->root_ns = create_root_ns(dev, FS_FT_NIC_RX,
2269 MLX5_CORE_FS_ROOT_NS_NAME);
2270 if (IS_ERR_OR_NULL(dev->root_ns))
2274 if (init_root_tree(dev, max_ft_level, &root_fs, &dev->root_ns->ns.base))
2277 set_max_ft(&dev->root_ns->ns);
2284 u8 mlx5_get_match_criteria_enable(struct mlx5_flow_rule *rule)
2286 struct fs_base *pbase;
2287 struct mlx5_flow_group *fg;
2289 pbase = rule->base.parent;
2291 pbase = pbase->parent;
2294 fs_get_obj(fg, pbase);
2295 return fg->mask.match_criteria_enable;
2298 void mlx5_get_match_value(u32 *match_value,
2299 struct mlx5_flow_rule *rule)
2301 struct fs_base *pbase;
2304 pbase = rule->base.parent;
2306 fs_get_obj(fte, pbase);
2308 memcpy(match_value, fte->val, sizeof(fte->val));
2311 void mlx5_get_match_criteria(u32 *match_criteria,
2312 struct mlx5_flow_rule *rule)
2314 struct fs_base *pbase;
2315 struct mlx5_flow_group *fg;
2317 pbase = rule->base.parent;
2319 pbase = pbase->parent;
2322 fs_get_obj(fg, pbase);
2323 memcpy(match_criteria, &fg->mask.match_criteria,
2324 sizeof(fg->mask.match_criteria));
2327 int mlx5_init_fs(struct mlx5_core_dev *dev)
2331 if (MLX5_CAP_GEN(dev, nic_flow_table)) {
2332 err = init_root_ns(dev);
2337 err = init_fdb_root_ns(dev);
2341 err = init_egress_acl_root_ns(dev);
2345 err = init_ingress_acl_root_ns(dev);
2349 err = init_sniffer_tx_root_ns(dev);
2353 err = init_sniffer_rx_root_ns(dev);
2359 mlx5_cleanup_fs(dev);
2363 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2364 enum mlx5_flow_namespace_type type)
2366 struct mlx5_flow_root_namespace *root_ns = dev->root_ns;
2368 static struct fs_prio *fs_prio;
2369 struct mlx5_flow_namespace *ns;
2372 case MLX5_FLOW_NAMESPACE_BYPASS:
2375 case MLX5_FLOW_NAMESPACE_KERNEL:
2378 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2381 case MLX5_FLOW_NAMESPACE_FDB:
2382 if (dev->fdb_root_ns)
2383 return &dev->fdb_root_ns->ns;
2386 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2387 if (dev->esw_egress_root_ns)
2388 return &dev->esw_egress_root_ns->ns;
2391 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2392 if (dev->esw_ingress_root_ns)
2393 return &dev->esw_ingress_root_ns->ns;
2396 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2397 if (dev->sniffer_rx_root_ns)
2398 return &dev->sniffer_rx_root_ns->ns;
2401 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2402 if (dev->sniffer_tx_root_ns)
2403 return &dev->sniffer_tx_root_ns->ns;
2413 fs_prio = find_prio(&root_ns->ns, prio);
2417 ns = list_first_entry(&fs_prio->objs,
2423 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2426 int mlx5_set_rule_private_data(struct mlx5_flow_rule *rule,
2427 struct mlx5_flow_handler *fs_handler,
2430 struct fs_client_priv_data *priv_data;
2432 mutex_lock(&rule->clients_lock);
2433 /*Check that hanlder isn't exists in the list already*/
2434 list_for_each_entry(priv_data, &rule->clients_data, list) {
2435 if (priv_data->fs_handler == fs_handler) {
2436 priv_data->client_dst_data = client_data;
2440 priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
2442 mutex_unlock(&rule->clients_lock);
2446 priv_data->client_dst_data = client_data;
2447 priv_data->fs_handler = fs_handler;
2448 list_add(&priv_data->list, &rule->clients_data);
2451 mutex_unlock(&rule->clients_lock);
2456 static int remove_from_clients(struct mlx5_flow_rule *rule,
2461 struct fs_client_priv_data *iter_client;
2462 struct fs_client_priv_data *temp_client;
2463 struct mlx5_flow_handler *handler = (struct
2464 mlx5_flow_handler*)context;
2466 mutex_lock(&rule->clients_lock);
2467 list_for_each_entry_safe(iter_client, temp_client,
2468 &rule->clients_data, list) {
2469 if (iter_client->fs_handler == handler) {
2470 list_del(&iter_client->list);
2475 mutex_unlock(&rule->clients_lock);
2480 struct mlx5_flow_handler *mlx5_register_rule_notifier(struct mlx5_core_dev *dev,
2481 enum mlx5_flow_namespace_type ns_type,
2482 rule_event_fn add_cb,
2483 rule_event_fn del_cb,
2486 struct mlx5_flow_namespace *ns;
2487 struct mlx5_flow_handler *handler;
2489 ns = mlx5_get_flow_namespace(dev, ns_type);
2491 return ERR_PTR(-EINVAL);
2493 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
2495 return ERR_PTR(-ENOMEM);
2497 handler->add_dst_cb = add_cb;
2498 handler->del_dst_cb = del_cb;
2499 handler->client_context = context;
2501 down_write(&ns->notifiers_rw_sem);
2502 list_add_tail(&handler->list, &ns->list_notifiers);
2503 up_write(&ns->notifiers_rw_sem);
2508 static void iterate_rules_in_ns(struct mlx5_flow_namespace *ns,
2509 rule_event_fn add_rule_cb,
2512 void mlx5_unregister_rule_notifier(struct mlx5_flow_handler *handler)
2514 struct mlx5_flow_namespace *ns = handler->ns;
2516 /*Remove from dst's clients*/
2517 down_write(&ns->dests_rw_sem);
2518 down_write(&ns->notifiers_rw_sem);
2519 iterate_rules_in_ns(ns, remove_from_clients, handler);
2520 list_del(&handler->list);
2521 up_write(&ns->notifiers_rw_sem);
2522 up_write(&ns->dests_rw_sem);
2526 static void iterate_rules_in_ft(struct mlx5_flow_table *ft,
2527 rule_event_fn add_rule_cb,
2530 struct mlx5_flow_group *iter_fg;
2531 struct fs_fte *iter_fte;
2532 struct mlx5_flow_rule *iter_rule;
2536 mutex_lock(&ft->base.lock);
2537 fs_for_each_fg(iter_fg, ft) {
2538 mutex_lock(&iter_fg->base.lock);
2539 fs_for_each_fte(iter_fte, iter_fg) {
2540 mutex_lock(&iter_fte->base.lock);
2542 fs_for_each_dst(iter_rule, iter_fte) {
2543 fs_get(&iter_rule->base);
2544 err = add_rule_cb(iter_rule,
2548 fs_put_parent_locked(&iter_rule->base);
2551 is_new_rule = false;
2553 mutex_unlock(&iter_fte->base.lock);
2557 mutex_unlock(&iter_fg->base.lock);
2561 mutex_unlock(&ft->base.lock);
2564 static void iterate_rules_in_prio(struct fs_prio *prio,
2565 rule_event_fn add_rule_cb,
2570 mutex_lock(&prio->base.lock);
2571 fs_for_each_ns_or_ft(it, prio) {
2572 if (it->type == FS_TYPE_FLOW_TABLE) {
2573 struct mlx5_flow_table *ft;
2576 iterate_rules_in_ft(ft, add_rule_cb, context);
2578 struct mlx5_flow_namespace *ns;
2581 iterate_rules_in_ns(ns, add_rule_cb, context);
2584 mutex_unlock(&prio->base.lock);
2587 static void iterate_rules_in_ns(struct mlx5_flow_namespace *ns,
2588 rule_event_fn add_rule_cb,
2591 struct fs_prio *iter_prio;
2593 mutex_lock(&ns->base.lock);
2594 fs_for_each_prio(iter_prio, ns) {
2595 iterate_rules_in_prio(iter_prio, add_rule_cb, context);
2597 mutex_unlock(&ns->base.lock);
2600 void mlx5_flow_iterate_existing_rules(struct mlx5_flow_namespace *ns,
2601 rule_event_fn add_rule_cb,
2604 down_write(&ns->dests_rw_sem);
2605 down_read(&ns->notifiers_rw_sem);
2606 iterate_rules_in_ns(ns, add_rule_cb, context);
2607 up_read(&ns->notifiers_rw_sem);
2608 up_write(&ns->dests_rw_sem);
2612 void mlx5_del_flow_rules_list(struct mlx5_flow_rules_list *rules_list)
2614 struct mlx5_flow_rule_node *iter_node;
2615 struct mlx5_flow_rule_node *temp_node;
2617 list_for_each_entry_safe(iter_node, temp_node, &rules_list->head, list) {
2618 list_del(&iter_node->list);
2625 #define ROCEV1_ETHERTYPE 0x8915
2626 static int set_rocev1_rules(struct list_head *rules_list)
2628 struct mlx5_flow_rule_node *rocev1_rule;
2630 rocev1_rule = kzalloc(sizeof(*rocev1_rule), GFP_KERNEL);
2634 rocev1_rule->match_criteria_enable =
2635 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
2636 MLX5_SET(fte_match_set_lyr_2_4, rocev1_rule->match_criteria, ethertype,
2638 MLX5_SET(fte_match_set_lyr_2_4, rocev1_rule->match_value, ethertype,
2641 list_add_tail(&rocev1_rule->list, rules_list);
2646 #define ROCEV2_UDP_PORT 4791
2647 static int set_rocev2_rules(struct list_head *rules_list)
2649 struct mlx5_flow_rule_node *ipv4_rule;
2650 struct mlx5_flow_rule_node *ipv6_rule;
2652 ipv4_rule = kzalloc(sizeof(*ipv4_rule), GFP_KERNEL);
2656 ipv6_rule = kzalloc(sizeof(*ipv6_rule), GFP_KERNEL);
2662 ipv4_rule->match_criteria_enable =
2663 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
2664 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, ethertype,
2666 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, ethertype,
2668 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, ip_protocol,
2670 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, ip_protocol,
2672 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, udp_dport,
2674 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, udp_dport,
2677 ipv6_rule->match_criteria_enable =
2678 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
2679 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, ethertype,
2681 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, ethertype,
2683 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, ip_protocol,
2685 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, ip_protocol,
2687 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, udp_dport,
2689 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, udp_dport,
2692 list_add_tail(&ipv4_rule->list, rules_list);
2693 list_add_tail(&ipv6_rule->list, rules_list);
2699 struct mlx5_flow_rules_list *get_roce_flow_rules(u8 roce_mode)
2702 struct mlx5_flow_rules_list *rules_list =
2703 kzalloc(sizeof(*rules_list), GFP_KERNEL);
2708 INIT_LIST_HEAD(&rules_list->head);
2710 if (roce_mode & MLX5_ROCE_VERSION_1_CAP) {
2711 err = set_rocev1_rules(&rules_list->head);
2715 if (roce_mode & MLX5_ROCE_VERSION_2_CAP)
2716 err = set_rocev2_rules(&rules_list->head);
2723 mlx5_del_flow_rules_list(rules_list);