2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/module.h>
29 #include <dev/mlx5/driver.h>
30 #include "mlx5_core.h"
32 #include <linux/string.h>
33 #include <linux/compiler.h>
35 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
36 sizeof(struct init_tree_node))
38 #define ADD_PRIO(name_val, flags_val, min_level_val, max_ft_val, caps_val, \
39 ...) {.type = FS_TYPE_PRIO,\
41 .min_ft_level = min_level_val,\
43 .max_ft = max_ft_val,\
45 .children = (struct init_tree_node[]) {__VA_ARGS__},\
46 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
49 #define ADD_FT_PRIO(name_val, flags_val, max_ft_val, ...)\
50 ADD_PRIO(name_val, flags_val, 0, max_ft_val, {},\
53 #define ADD_NS(name_val, ...) {.type = FS_TYPE_NAMESPACE,\
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
59 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
62 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
64 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
65 .caps = (long[]) {__VA_ARGS__}}
67 #define BYPASS_MAX_FT 5
68 #define BYPASS_PRIO_MAX_FT 1
69 #define KERNEL_MAX_FT 3
70 #define LEFTOVER_MAX_FT 1
71 #define KENREL_MIN_LEVEL 3
72 #define LEFTOVER_MIN_LEVEL KENREL_MIN_LEVEL + 1
73 #define BYPASS_MIN_LEVEL MLX5_NUM_BYPASS_FTS + LEFTOVER_MIN_LEVEL
79 struct init_tree_node {
82 struct init_tree_node *children;
84 struct node_caps caps;
90 .type = FS_TYPE_NAMESPACE,
93 .children = (struct init_tree_node[]) {
94 ADD_PRIO("by_pass_prio", 0, BYPASS_MIN_LEVEL, 0,
95 FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
96 FS_CAP(flow_table_properties_nic_receive.modify_root)),
98 ADD_FT_PRIO("prio0", 0,
100 ADD_FT_PRIO("prio1", 0,
102 ADD_FT_PRIO("prio2", 0,
104 ADD_FT_PRIO("prio3", 0,
106 ADD_FT_PRIO("prio4", 0,
108 ADD_FT_PRIO("prio5", 0,
110 ADD_FT_PRIO("prio6", 0,
112 ADD_FT_PRIO("prio7", 0,
114 ADD_FT_PRIO("prio-mcast", 0,
115 BYPASS_PRIO_MAX_FT))),
116 ADD_PRIO("kernel_prio", 0, KENREL_MIN_LEVEL, 0, {},
118 ADD_FT_PRIO("prio_kernel-0", 0,
120 ADD_PRIO("leftovers_prio", MLX5_CORE_FS_PRIO_SHARED,
121 LEFTOVER_MIN_LEVEL, 0,
122 FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
123 FS_CAP(flow_table_properties_nic_receive.modify_root)),
124 ADD_NS("leftover_ns",
125 ADD_FT_PRIO("leftovers_prio-0",
126 MLX5_CORE_FS_PRIO_SHARED,
131 /* Tree creation functions */
133 static struct mlx5_flow_root_namespace *find_root(struct fs_base *node)
135 struct fs_base *parent;
137 /* Make sure we only read it once while we go up the tree */
138 while ((parent = node->parent))
141 if (node->type != FS_TYPE_NAMESPACE) {
142 printf("mlx5_core: WARN: ""mlx5: flow steering node %s is not in tree or garbaged\n", node->name);
146 return container_of(container_of(node,
147 struct mlx5_flow_namespace,
149 struct mlx5_flow_root_namespace,
153 static inline struct mlx5_core_dev *fs_get_dev(struct fs_base *node)
155 struct mlx5_flow_root_namespace *root = find_root(node);
162 static void fs_init_node(struct fs_base *node,
163 unsigned int refcount)
165 kref_init(&node->refcount);
166 atomic_set(&node->users_refcount, refcount);
167 init_completion(&node->complete);
168 INIT_LIST_HEAD(&node->list);
169 mutex_init(&node->lock);
172 static void _fs_add_node(struct fs_base *node,
174 struct fs_base *parent)
177 atomic_inc(&parent->users_refcount);
178 node->name = kstrdup_const(name, GFP_KERNEL);
179 node->parent = parent;
182 static void fs_add_node(struct fs_base *node,
183 struct fs_base *parent, const char *name,
184 unsigned int refcount)
186 fs_init_node(node, refcount);
187 _fs_add_node(node, name, parent);
190 static void _fs_put(struct fs_base *node, void (*kref_cb)(struct kref *kref),
193 static void fs_del_dst(struct mlx5_flow_rule *dst);
194 static void _fs_del_ft(struct mlx5_flow_table *ft);
195 static void fs_del_fg(struct mlx5_flow_group *fg);
196 static void fs_del_fte(struct fs_fte *fte);
198 static void cmd_remove_node(struct fs_base *base)
200 switch (base->type) {
201 case FS_TYPE_FLOW_DEST:
202 fs_del_dst(container_of(base, struct mlx5_flow_rule, base));
204 case FS_TYPE_FLOW_TABLE:
205 _fs_del_ft(container_of(base, struct mlx5_flow_table, base));
207 case FS_TYPE_FLOW_GROUP:
208 fs_del_fg(container_of(base, struct mlx5_flow_group, base));
210 case FS_TYPE_FLOW_ENTRY:
211 fs_del_fte(container_of(base, struct fs_fte, base));
218 static void __fs_remove_node(struct kref *kref)
220 struct fs_base *node = container_of(kref, struct fs_base, refcount);
223 mutex_lock(&node->parent->lock);
224 mutex_lock(&node->lock);
225 cmd_remove_node(node);
226 mutex_unlock(&node->lock);
227 complete(&node->complete);
229 mutex_unlock(&node->parent->lock);
230 _fs_put(node->parent, _fs_remove_node, false);
234 void _fs_remove_node(struct kref *kref)
236 struct fs_base *node = container_of(kref, struct fs_base, refcount);
238 __fs_remove_node(kref);
239 kfree_const(node->name);
243 static void fs_get(struct fs_base *node)
245 atomic_inc(&node->users_refcount);
248 static void _fs_put(struct fs_base *node, void (*kref_cb)(struct kref *kref),
251 struct fs_base *parent_node = node->parent;
253 if (parent_node && !parent_locked)
254 mutex_lock(&parent_node->lock);
255 if (atomic_dec_and_test(&node->users_refcount)) {
257 /*remove from parent's list*/
258 list_del_init(&node->list);
259 mutex_unlock(&parent_node->lock);
261 kref_put(&node->refcount, kref_cb);
262 if (parent_node && parent_locked)
263 mutex_lock(&parent_node->lock);
264 } else if (parent_node && !parent_locked) {
265 mutex_unlock(&parent_node->lock);
269 static void fs_put(struct fs_base *node)
271 _fs_put(node, __fs_remove_node, false);
274 static void fs_put_parent_locked(struct fs_base *node)
276 _fs_put(node, __fs_remove_node, true);
279 static void fs_remove_node(struct fs_base *node)
282 wait_for_completion(&node->complete);
283 kfree_const(node->name);
287 static void fs_remove_node_parent_locked(struct fs_base *node)
289 fs_put_parent_locked(node);
290 wait_for_completion(&node->complete);
291 kfree_const(node->name);
295 static struct fs_fte *fs_alloc_fte(u8 action,
303 fte = kzalloc(sizeof(*fte), GFP_KERNEL);
305 return ERR_PTR(-ENOMEM);
307 memcpy(fte->val, match_value, sizeof(fte->val));
308 fte->base.type = FS_TYPE_FLOW_ENTRY;
310 fte->flow_tag = flow_tag;
312 INIT_LIST_HEAD(&fte->dests);
313 fte->action = action;
318 static struct fs_fte *alloc_star_ft_entry(struct mlx5_flow_table *ft,
319 struct mlx5_flow_group *fg,
325 struct mlx5_flow_rule *dst;
327 if (fg->num_ftes == fg->max_ftes)
328 return ERR_PTR(-ENOSPC);
330 fte = fs_alloc_fte(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
331 MLX5_FS_DEFAULT_FLOW_TAG, match_value, index);
336 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
342 fte->base.parent = &fg->base;
344 dst->dest_attr.type = MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE;
345 dst->base.parent = &fte->base;
346 list_add(&dst->base.list, &fte->dests);
347 /* assumed that the callee creates the star rules sorted by index */
348 list_add_tail(&fte->base.list, &fg->ftes);
358 /* assume that fte can't be changed */
359 static void free_star_fte_entry(struct fs_fte *fte)
361 struct mlx5_flow_group *fg;
362 struct mlx5_flow_rule *dst, *temp;
364 fs_get_parent(fg, fte);
366 list_for_each_entry_safe(dst, temp, &fte->dests, base.list) {
368 list_del(&dst->base.list);
372 list_del(&fte->base.list);
377 static struct mlx5_flow_group *fs_alloc_fg(u32 *create_fg_in)
379 struct mlx5_flow_group *fg;
380 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
381 create_fg_in, match_criteria);
382 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
384 match_criteria_enable);
385 fg = kzalloc(sizeof(*fg), GFP_KERNEL);
387 return ERR_PTR(-ENOMEM);
389 INIT_LIST_HEAD(&fg->ftes);
390 fg->mask.match_criteria_enable = match_criteria_enable;
391 memcpy(&fg->mask.match_criteria, match_criteria,
392 sizeof(fg->mask.match_criteria));
393 fg->base.type = FS_TYPE_FLOW_GROUP;
394 fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
396 fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
397 end_flow_index) - fg->start_index + 1;
401 static struct mlx5_flow_table *find_next_ft(struct fs_prio *prio);
402 static struct mlx5_flow_table *find_prev_ft(struct mlx5_flow_table *curr,
403 struct fs_prio *prio);
405 /* assumed src_ft and dst_ft can't be freed */
406 static int fs_set_star_rule(struct mlx5_core_dev *dev,
407 struct mlx5_flow_table *src_ft,
408 struct mlx5_flow_table *dst_ft)
410 struct mlx5_flow_rule *src_dst;
411 struct fs_fte *src_fte;
414 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
416 src_dst = list_first_entry(&src_ft->star_rule.fte->dests,
417 struct mlx5_flow_rule, base.list);
418 match_value = mlx5_vzalloc(match_len);
420 mlx5_core_warn(dev, "failed to allocate inbox\n");
423 /*Create match context*/
425 fs_get_parent(src_fte, src_dst);
427 src_dst->dest_attr.ft = dst_ft;
429 err = mlx5_cmd_fs_set_fte(dev,
432 match_value, src_ft->type,
433 src_ft->id, src_fte->index,
434 src_ft->star_rule.fg->id,
442 fs_get(&dst_ft->base);
444 mlx5_cmd_fs_delete_fte(dev,
447 src_ft->type, src_ft->id,
456 static int connect_prev_fts(struct fs_prio *locked_prio,
457 struct fs_prio *prev_prio,
458 struct mlx5_flow_table *next_ft)
460 struct mlx5_flow_table *iter;
462 struct mlx5_core_dev *dev = fs_get_dev(&prev_prio->base);
467 mutex_lock(&prev_prio->base.lock);
468 fs_for_each_ft(iter, prev_prio) {
469 struct mlx5_flow_rule *src_dst =
470 list_first_entry(&iter->star_rule.fte->dests,
471 struct mlx5_flow_rule, base.list);
472 struct mlx5_flow_table *prev_ft = src_dst->dest_attr.ft;
474 if (prev_ft == next_ft)
477 err = fs_set_star_rule(dev, iter, next_ft);
480 "mlx5: flow steering can't connect prev and next\n");
483 /* Assume ft's prio is locked */
485 struct fs_prio *prio;
487 fs_get_parent(prio, prev_ft);
488 if (prio == locked_prio)
489 fs_put_parent_locked(&prev_ft->base);
491 fs_put(&prev_ft->base);
497 mutex_unlock(&prev_prio->base.lock);
501 static int create_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio)
503 struct mlx5_flow_group *fg;
507 struct mlx5_flow_table *next_ft;
508 struct mlx5_flow_table *prev_ft;
509 struct mlx5_flow_root_namespace *root = find_root(&prio->base);
510 int fg_inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
511 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
513 fg_in = mlx5_vzalloc(fg_inlen);
515 mlx5_core_warn(root->dev, "failed to allocate inbox\n");
519 match_value = mlx5_vzalloc(match_len);
521 mlx5_core_warn(root->dev, "failed to allocate inbox\n");
526 MLX5_SET(create_flow_group_in, fg_in, start_flow_index, ft->max_fte);
527 MLX5_SET(create_flow_group_in, fg_in, end_flow_index, ft->max_fte);
528 fg = fs_alloc_fg(fg_in);
533 ft->star_rule.fg = fg;
534 err = mlx5_cmd_fs_create_fg(fs_get_dev(&prio->base),
535 fg_in, ft->vport, ft->type,
541 ft->star_rule.fte = alloc_star_ft_entry(ft, fg,
544 if (IS_ERR(ft->star_rule.fte))
547 mutex_lock(&root->fs_chain_lock);
548 next_ft = find_next_ft(prio);
549 err = fs_set_star_rule(root->dev, ft, next_ft);
551 mutex_unlock(&root->fs_chain_lock);
555 struct fs_prio *parent;
557 fs_get_parent(parent, next_ft);
558 fs_put(&next_ft->base);
560 prev_ft = find_prev_ft(ft, prio);
562 struct fs_prio *prev_parent;
564 fs_get_parent(prev_parent, prev_ft);
566 err = connect_prev_fts(NULL, prev_parent, ft);
568 mutex_unlock(&root->fs_chain_lock);
569 goto destroy_chained_star_rule;
571 fs_put(&prev_ft->base);
573 mutex_unlock(&root->fs_chain_lock);
579 destroy_chained_star_rule:
580 fs_set_star_rule(fs_get_dev(&prio->base), ft, NULL);
582 fs_put(&next_ft->base);
584 free_star_fte_entry(ft->star_rule.fte);
585 mlx5_cmd_fs_destroy_fg(fs_get_dev(&ft->base), ft->vport,
596 static void destroy_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio)
599 struct mlx5_flow_root_namespace *root;
600 struct mlx5_core_dev *dev = fs_get_dev(&prio->base);
601 struct mlx5_flow_table *prev_ft, *next_ft;
602 struct fs_prio *prev_prio;
606 root = find_root(&prio->base);
608 printf("mlx5_core: ERR: ""mlx5: flow steering failed to find root of priority %s", prio->base.name);
610 /* In order to ensure atomic deletion, first update
611 * prev ft to point on the next ft.
613 mutex_lock(&root->fs_chain_lock);
614 prev_ft = find_prev_ft(ft, prio);
615 next_ft = find_next_ft(prio);
617 fs_get_parent(prev_prio, prev_ft);
618 /*Prev is connected to ft, only if ft is the first(last) in the prio*/
619 err = connect_prev_fts(prio, prev_prio, next_ft);
621 mlx5_core_warn(root->dev,
622 "flow steering can't connect prev and next of flow table\n");
623 fs_put(&prev_ft->base);
626 err = fs_set_star_rule(root->dev, ft, NULL);
627 /*One put is for fs_get in find next ft*/
629 fs_put(&next_ft->base);
631 fs_put(&next_ft->base);
634 mutex_unlock(&root->fs_chain_lock);
635 err = mlx5_cmd_fs_destroy_fg(dev, ft->vport, ft->type, ft->id,
636 ft->star_rule.fg->id);
639 "flow steering can't destroy star entry group(index:%d) of ft:%s\n", ft->star_rule.fg->start_index,
641 free_star_fte_entry(ft->star_rule.fte);
643 kfree(ft->star_rule.fg);
644 ft->star_rule.fg = NULL;
647 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
650 struct fs_prio *iter_prio;
652 fs_for_each_prio(iter_prio, ns) {
653 if (iter_prio->prio == prio)
660 static unsigned int _alloc_new_level(struct fs_prio *prio,
661 struct mlx5_flow_namespace *match);
663 static unsigned int __alloc_new_level(struct mlx5_flow_namespace *ns,
664 struct fs_prio *prio)
666 unsigned int level = 0;
672 mutex_lock(&ns->base.lock);
673 fs_for_each_prio(p, ns) {
679 mutex_unlock(&ns->base.lock);
681 fs_get_parent(prio, ns);
683 WARN_ON(prio->base.type != FS_TYPE_PRIO);
685 return level + _alloc_new_level(prio, ns);
688 /* Called under lock of priority, hence locking all upper objects */
689 static unsigned int _alloc_new_level(struct fs_prio *prio,
690 struct mlx5_flow_namespace *match)
692 struct mlx5_flow_namespace *ns;
694 unsigned int level = 0;
699 mutex_lock(&prio->base.lock);
700 fs_for_each_ns_or_ft_reverse(it, prio) {
701 if (it->type == FS_TYPE_NAMESPACE) {
707 mutex_lock(&ns->base.lock);
708 fs_for_each_prio(p, ns)
710 mutex_unlock(&ns->base.lock);
715 struct mlx5_flow_table *ft;
718 mutex_unlock(&prio->base.lock);
719 return level + ft->level + 1;
723 fs_get_parent(ns, prio);
724 mutex_unlock(&prio->base.lock);
725 return __alloc_new_level(ns, prio) + level;
728 static unsigned int alloc_new_level(struct fs_prio *prio)
730 return _alloc_new_level(prio, NULL);
733 static int update_root_ft_create(struct mlx5_flow_root_namespace *root,
734 struct mlx5_flow_table *ft)
737 int min_level = INT_MAX;
740 min_level = root->root_ft->level;
742 if (ft->level < min_level)
743 err = mlx5_cmd_update_root_ft(root->dev, ft->type,
749 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
757 static struct mlx5_flow_table *_create_ft_common(struct mlx5_flow_namespace *ns,
759 struct fs_prio *fs_prio,
763 struct mlx5_flow_table *ft;
768 struct mlx5_flow_root_namespace *root =
769 find_root(&ns->base);
772 printf("mlx5_core: ERR: ""mlx5: flow steering failed to find root of namespace %s", ns->base.name);
773 return ERR_PTR(-ENODEV);
776 if (fs_prio->num_ft == fs_prio->max_ft)
777 return ERR_PTR(-ENOSPC);
779 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
781 return ERR_PTR(-ENOMEM);
783 fs_init_node(&ft->base, 1);
784 INIT_LIST_HEAD(&ft->fgs);
786 /* Temporarily WA until we expose the level set in the API */
787 if (root->table_type == FS_FT_ESW_EGRESS_ACL ||
788 root->table_type == FS_FT_ESW_INGRESS_ACL)
791 ft->level = alloc_new_level(fs_prio);
793 ft->base.type = FS_TYPE_FLOW_TABLE;
795 ft->type = root->table_type;
796 /*Two entries are reserved for star rules*/
797 ft_size = roundup_pow_of_two(max_fte + 2);
798 /*User isn't aware to those rules*/
799 ft->max_fte = ft_size - 2;
800 log_table_sz = ilog2(ft_size);
801 err = mlx5_cmd_fs_create_ft(root->dev, ft->vport, ft->type,
802 ft->level, log_table_sz, &ft->id);
806 err = create_star_rule(ft, fs_prio);
810 if ((root->table_type == FS_FT_NIC_RX) && MLX5_CAP_FLOWTABLE(root->dev,
811 flow_table_properties_nic_receive.modify_root)) {
812 err = update_root_ft_create(root, ft);
814 goto destroy_star_rule;
817 if (!name || !strlen(name)) {
818 snprintf(gen_name, 20, "flow_table_%u", ft->id);
819 _fs_add_node(&ft->base, gen_name, &fs_prio->base);
821 _fs_add_node(&ft->base, name, &fs_prio->base);
823 list_add_tail(&ft->base.list, &fs_prio->objs);
829 destroy_star_rule(ft, fs_prio);
831 mlx5_cmd_fs_destroy_ft(root->dev, ft->vport, ft->type, ft->id);
837 static struct mlx5_flow_table *create_ft_common(struct mlx5_flow_namespace *ns,
843 struct fs_prio *fs_prio = NULL;
844 fs_prio = find_prio(ns, prio);
846 return ERR_PTR(-EINVAL);
848 return _create_ft_common(ns, vport, fs_prio, max_fte, name);
852 static struct mlx5_flow_table *find_first_ft_in_ns(struct mlx5_flow_namespace *ns,
853 struct list_head *start);
855 static struct mlx5_flow_table *find_first_ft_in_prio(struct fs_prio *prio,
856 struct list_head *start);
858 static struct mlx5_flow_table *mlx5_create_autogrouped_shared_flow_table(struct fs_prio *fs_prio)
860 struct mlx5_flow_table *ft;
862 ft = find_first_ft_in_prio(fs_prio, &fs_prio->objs);
864 ft->shared_refcount++;
871 struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
874 int num_flow_table_entries,
877 struct mlx5_flow_table *ft = NULL;
878 struct fs_prio *fs_prio;
881 fs_prio = find_prio(ns, prio);
883 return ERR_PTR(-EINVAL);
885 is_shared_prio = fs_prio->flags & MLX5_CORE_FS_PRIO_SHARED;
886 if (is_shared_prio) {
887 mutex_lock(&fs_prio->shared_lock);
888 ft = mlx5_create_autogrouped_shared_flow_table(fs_prio);
894 ft = create_ft_common(ns, 0, prio, num_flow_table_entries,
899 ft->autogroup.active = true;
900 ft->autogroup.max_types = max_num_groups;
902 ft->shared_refcount = 1;
906 mutex_unlock(&fs_prio->shared_lock);
909 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
911 struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
915 int num_flow_table_entries)
917 return create_ft_common(ns, vport, prio, num_flow_table_entries, name);
919 EXPORT_SYMBOL(mlx5_create_vport_flow_table);
921 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
924 int num_flow_table_entries)
926 return create_ft_common(ns, 0, prio, num_flow_table_entries, name);
928 EXPORT_SYMBOL(mlx5_create_flow_table);
930 static void _fs_del_ft(struct mlx5_flow_table *ft)
933 struct mlx5_core_dev *dev = fs_get_dev(&ft->base);
934 struct fs_prio *prio;
936 err = mlx5_cmd_fs_destroy_ft(dev, ft->vport, ft->type, ft->id);
938 mlx5_core_warn(dev, "flow steering can't destroy ft %s\n",
941 fs_get_parent(prio, ft);
945 static int update_root_ft_destroy(struct mlx5_flow_root_namespace *root,
946 struct mlx5_flow_table *ft)
949 struct fs_prio *prio;
950 struct mlx5_flow_table *next_ft = NULL;
951 struct mlx5_flow_table *put_ft = NULL;
953 if (root->root_ft != ft)
956 fs_get_parent(prio, ft);
957 /*Assuming objs containis only flow tables and
958 * flow tables are sorted by level.
960 if (!list_is_last(&ft->base.list, &prio->objs)) {
961 next_ft = list_next_entry(ft, base.list);
963 next_ft = find_next_ft(prio);
968 err = mlx5_cmd_update_root_ft(root->dev, next_ft->type,
971 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
975 root->root_ft = next_ft;
978 fs_put(&put_ft->base);
983 /*Objects in the same prio are destroyed in the reverse order they were createrd*/
984 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
987 struct fs_prio *prio;
988 struct mlx5_flow_root_namespace *root;
991 fs_get_parent(prio, ft);
992 root = find_root(&prio->base);
995 printf("mlx5_core: ERR: ""mlx5: flow steering failed to find root of priority %s", prio->base.name);
999 is_shared_prio = prio->flags & MLX5_CORE_FS_PRIO_SHARED;
1000 if (is_shared_prio) {
1001 mutex_lock(&prio->shared_lock);
1002 if (ft->shared_refcount > 1) {
1003 --ft->shared_refcount;
1005 mutex_unlock(&prio->shared_lock);
1010 mutex_lock(&prio->base.lock);
1011 mutex_lock(&ft->base.lock);
1013 err = update_root_ft_destroy(root, ft);
1017 /* delete two last entries */
1018 destroy_star_rule(ft, prio);
1020 mutex_unlock(&ft->base.lock);
1021 fs_remove_node_parent_locked(&ft->base);
1022 mutex_unlock(&prio->base.lock);
1024 mutex_unlock(&prio->shared_lock);
1029 mutex_unlock(&ft->base.lock);
1030 mutex_unlock(&prio->base.lock);
1032 mutex_unlock(&prio->shared_lock);
1036 EXPORT_SYMBOL(mlx5_destroy_flow_table);
1038 static struct mlx5_flow_group *fs_create_fg(struct mlx5_core_dev *dev,
1039 struct mlx5_flow_table *ft,
1040 struct list_head *prev,
1044 struct mlx5_flow_group *fg;
1046 unsigned int end_index;
1049 fg = fs_alloc_fg(fg_in);
1053 end_index = fg->start_index + fg->max_ftes - 1;
1054 err = mlx5_cmd_fs_create_fg(dev, fg_in,
1055 ft->vport, ft->type, ft->id,
1060 mutex_lock(&ft->base.lock);
1061 if (ft->autogroup.active)
1062 ft->autogroup.num_types++;
1064 snprintf(name, sizeof(name), "group_%u", fg->id);
1065 /*Add node to tree*/
1066 fs_add_node(&fg->base, &ft->base, name, refcount);
1067 /*Add node to group list*/
1068 list_add(&fg->base.list, prev);
1069 mutex_unlock(&ft->base.lock);
1075 return ERR_PTR(err);
1078 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1081 struct mlx5_flow_group *fg;
1082 struct mlx5_core_dev *dev = fs_get_dev(&ft->base);
1085 return ERR_PTR(-ENODEV);
1087 if (ft->autogroup.active)
1088 return ERR_PTR(-EPERM);
1090 fg = fs_create_fg(dev, ft, ft->fgs.prev, in, 1);
1094 EXPORT_SYMBOL(mlx5_create_flow_group);
1096 /*Group is destoyed when all the rules in the group were removed*/
1097 static void fs_del_fg(struct mlx5_flow_group *fg)
1099 struct mlx5_flow_table *parent_ft;
1100 struct mlx5_core_dev *dev;
1102 fs_get_parent(parent_ft, fg);
1103 dev = fs_get_dev(&parent_ft->base);
1106 if (parent_ft->autogroup.active)
1107 parent_ft->autogroup.num_types--;
1109 if (mlx5_cmd_fs_destroy_fg(dev, parent_ft->vport,
1111 parent_ft->id, fg->id))
1112 mlx5_core_warn(dev, "flow steering can't destroy fg\n");
1115 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
1117 fs_remove_node(&fg->base);
1119 EXPORT_SYMBOL(mlx5_destroy_flow_group);
1121 static bool _fs_match_exact_val(void *mask, void *val1, void *val2, size_t size)
1125 /* TODO: optimize by comparing 64bits when possible */
1126 for (i = 0; i < size; i++, mask++, val1++, val2++)
1127 if ((*((u8 *)val1) & (*(u8 *)mask)) !=
1128 ((*(u8 *)val2) & (*(u8 *)mask)))
1134 bool fs_match_exact_val(struct mlx5_core_fs_mask *mask,
1135 void *val1, void *val2)
1137 if (mask->match_criteria_enable &
1138 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
1139 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
1140 val1, outer_headers);
1141 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
1142 val2, outer_headers);
1143 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
1144 mask->match_criteria, outer_headers);
1146 if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
1147 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
1151 if (mask->match_criteria_enable &
1152 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
1153 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
1154 val1, misc_parameters);
1155 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
1156 val2, misc_parameters);
1157 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
1158 mask->match_criteria, misc_parameters);
1160 if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
1161 MLX5_ST_SZ_BYTES(fte_match_set_misc)))
1164 if (mask->match_criteria_enable &
1165 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
1166 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
1167 val1, inner_headers);
1168 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
1169 val2, inner_headers);
1170 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
1171 mask->match_criteria, inner_headers);
1173 if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
1174 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
1180 bool fs_match_exact_mask(u8 match_criteria_enable1,
1181 u8 match_criteria_enable2,
1182 void *mask1, void *mask2)
1184 return match_criteria_enable1 == match_criteria_enable2 &&
1185 !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
1188 static struct mlx5_flow_table *find_first_ft_in_ns_reverse(struct mlx5_flow_namespace *ns,
1189 struct list_head *start);
1191 static struct mlx5_flow_table *_find_first_ft_in_prio_reverse(struct fs_prio *prio,
1192 struct list_head *start)
1194 struct fs_base *it = container_of(start, struct fs_base, list);
1199 fs_for_each_ns_or_ft_continue_reverse(it, prio) {
1200 struct mlx5_flow_namespace *ns;
1201 struct mlx5_flow_table *ft;
1203 if (it->type == FS_TYPE_FLOW_TABLE) {
1210 WARN_ON(ns->base.type != FS_TYPE_NAMESPACE);
1212 ft = find_first_ft_in_ns_reverse(ns, &ns->prios);
1220 static struct mlx5_flow_table *find_first_ft_in_prio_reverse(struct fs_prio *prio,
1221 struct list_head *start)
1223 struct mlx5_flow_table *ft;
1228 mutex_lock(&prio->base.lock);
1229 ft = _find_first_ft_in_prio_reverse(prio, start);
1230 mutex_unlock(&prio->base.lock);
1235 static struct mlx5_flow_table *find_first_ft_in_ns_reverse(struct mlx5_flow_namespace *ns,
1236 struct list_head *start)
1238 struct fs_prio *prio;
1243 fs_get_obj(prio, container_of(start, struct fs_base, list));
1244 mutex_lock(&ns->base.lock);
1245 fs_for_each_prio_continue_reverse(prio, ns) {
1246 struct mlx5_flow_table *ft;
1248 ft = find_first_ft_in_prio_reverse(prio, &prio->objs);
1250 mutex_unlock(&ns->base.lock);
1254 mutex_unlock(&ns->base.lock);
1259 /* Returned a held ft, assumed curr is protected, assumed curr's parent is
1262 static struct mlx5_flow_table *find_prev_ft(struct mlx5_flow_table *curr,
1263 struct fs_prio *prio)
1265 struct mlx5_flow_table *ft = NULL;
1266 struct fs_base *curr_base;
1271 /* prio has either namespace or flow-tables, but not both */
1272 if (!list_empty(&prio->objs) &&
1273 list_first_entry(&prio->objs, struct mlx5_flow_table, base.list) !=
1277 while (!ft && prio) {
1278 struct mlx5_flow_namespace *ns;
1280 fs_get_parent(ns, prio);
1281 ft = find_first_ft_in_ns_reverse(ns, &prio->base.list);
1282 curr_base = &ns->base;
1283 fs_get_parent(prio, ns);
1286 ft = find_first_ft_in_prio_reverse(prio,
1292 static struct mlx5_flow_table *_find_first_ft_in_prio(struct fs_prio *prio,
1293 struct list_head *start)
1295 struct fs_base *it = container_of(start, struct fs_base, list);
1300 fs_for_each_ns_or_ft_continue(it, prio) {
1301 struct mlx5_flow_namespace *ns;
1302 struct mlx5_flow_table *ft;
1304 if (it->type == FS_TYPE_FLOW_TABLE) {
1311 WARN_ON(ns->base.type != FS_TYPE_NAMESPACE);
1313 ft = find_first_ft_in_ns(ns, &ns->prios);
1321 static struct mlx5_flow_table *find_first_ft_in_prio(struct fs_prio *prio,
1322 struct list_head *start)
1324 struct mlx5_flow_table *ft;
1329 mutex_lock(&prio->base.lock);
1330 ft = _find_first_ft_in_prio(prio, start);
1331 mutex_unlock(&prio->base.lock);
1336 static struct mlx5_flow_table *find_first_ft_in_ns(struct mlx5_flow_namespace *ns,
1337 struct list_head *start)
1339 struct fs_prio *prio;
1344 fs_get_obj(prio, container_of(start, struct fs_base, list));
1345 mutex_lock(&ns->base.lock);
1346 fs_for_each_prio_continue(prio, ns) {
1347 struct mlx5_flow_table *ft;
1349 ft = find_first_ft_in_prio(prio, &prio->objs);
1351 mutex_unlock(&ns->base.lock);
1355 mutex_unlock(&ns->base.lock);
1360 /* returned a held ft, assumed curr is protected, assumed curr's parent is
1363 static struct mlx5_flow_table *find_next_ft(struct fs_prio *prio)
1365 struct mlx5_flow_table *ft = NULL;
1366 struct fs_base *curr_base;
1368 while (!ft && prio) {
1369 struct mlx5_flow_namespace *ns;
1371 fs_get_parent(ns, prio);
1372 ft = find_first_ft_in_ns(ns, &prio->base.list);
1373 curr_base = &ns->base;
1374 fs_get_parent(prio, ns);
1377 ft = _find_first_ft_in_prio(prio, &curr_base->list);
1383 /* called under ft mutex lock */
1384 static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
1385 u8 match_criteria_enable,
1386 u32 *match_criteria)
1388 unsigned int group_size;
1389 unsigned int candidate_index = 0;
1390 unsigned int candidate_group_num = 0;
1391 struct mlx5_flow_group *g;
1392 struct mlx5_flow_group *ret;
1393 struct list_head *prev = &ft->fgs;
1394 struct mlx5_core_dev *dev;
1396 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1397 void *match_criteria_addr;
1399 if (!ft->autogroup.active)
1400 return ERR_PTR(-ENOENT);
1402 dev = fs_get_dev(&ft->base);
1404 return ERR_PTR(-ENODEV);
1406 in = mlx5_vzalloc(inlen);
1408 mlx5_core_warn(dev, "failed to allocate inbox\n");
1409 return ERR_PTR(-ENOMEM);
1413 if (ft->autogroup.num_types < ft->autogroup.max_types)
1414 group_size = ft->max_fte / (ft->autogroup.max_types + 1);
1418 if (group_size == 0) {
1420 "flow steering can't create group size of 0\n");
1421 ret = ERR_PTR(-EINVAL);
1425 /* sorted by start_index */
1426 fs_for_each_fg(g, ft) {
1427 candidate_group_num++;
1428 if (candidate_index + group_size > g->start_index)
1429 candidate_index = g->start_index + g->max_ftes;
1432 prev = &g->base.list;
1435 if (candidate_index + group_size > ft->max_fte) {
1436 ret = ERR_PTR(-ENOSPC);
1440 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1441 match_criteria_enable);
1442 MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
1443 MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index +
1445 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1446 in, match_criteria);
1447 memcpy(match_criteria_addr, match_criteria,
1448 MLX5_ST_SZ_BYTES(fte_match_param));
1450 ret = fs_create_fg(dev, ft, prev, in, 0);
1456 static struct mlx5_flow_namespace *get_ns_with_notifiers(struct fs_base *node)
1458 struct mlx5_flow_namespace *ns = NULL;
1460 while (node && (node->type != FS_TYPE_NAMESPACE ||
1461 list_empty(&container_of(node, struct
1462 mlx5_flow_namespace,
1463 base)->list_notifiers)))
1464 node = node->parent;
1467 fs_get_obj(ns, node);
1473 /*Assumption- fte is locked*/
1474 static void call_to_add_rule_notifiers(struct mlx5_flow_rule *dst,
1477 struct mlx5_flow_namespace *ns;
1478 struct mlx5_flow_handler *iter_handler;
1479 struct fs_client_priv_data *iter_client;
1481 bool is_new_rule = list_first_entry(&fte->dests,
1482 struct mlx5_flow_rule,
1486 ns = get_ns_with_notifiers(&fte->base);
1490 down_read(&ns->notifiers_rw_sem);
1491 list_for_each_entry(iter_handler, &ns->list_notifiers,
1493 if (iter_handler->add_dst_cb) {
1495 mutex_lock(&dst->clients_lock);
1496 list_for_each_entry(
1497 iter_client, &dst->clients_data, list) {
1498 if (iter_client->fs_handler == iter_handler) {
1499 data = iter_client->client_dst_data;
1503 mutex_unlock(&dst->clients_lock);
1504 err = iter_handler->add_dst_cb(dst,
1507 iter_handler->client_context);
1512 up_read(&ns->notifiers_rw_sem);
1515 static void call_to_del_rule_notifiers(struct mlx5_flow_rule *dst,
1518 struct mlx5_flow_namespace *ns;
1519 struct mlx5_flow_handler *iter_handler;
1520 struct fs_client_priv_data *iter_client;
1522 bool ctx_changed = (fte->dests_size == 0);
1524 ns = get_ns_with_notifiers(&fte->base);
1527 down_read(&ns->notifiers_rw_sem);
1528 list_for_each_entry(iter_handler, &ns->list_notifiers,
1531 mutex_lock(&dst->clients_lock);
1532 list_for_each_entry(iter_client, &dst->clients_data, list) {
1533 if (iter_client->fs_handler == iter_handler) {
1534 data = iter_client->client_dst_data;
1538 mutex_unlock(&dst->clients_lock);
1539 if (iter_handler->del_dst_cb) {
1540 iter_handler->del_dst_cb(dst, ctx_changed, data,
1541 iter_handler->client_context);
1544 up_read(&ns->notifiers_rw_sem);
1547 /* fte should not be deleted while calling this function */
1548 static struct mlx5_flow_rule *_fs_add_dst_fte(struct fs_fte *fte,
1549 struct mlx5_flow_group *fg,
1550 struct mlx5_flow_destination *dest)
1552 struct mlx5_flow_table *ft;
1553 struct mlx5_flow_rule *dst;
1556 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1558 return ERR_PTR(-ENOMEM);
1560 memcpy(&dst->dest_attr, dest, sizeof(*dest));
1561 dst->base.type = FS_TYPE_FLOW_DEST;
1562 INIT_LIST_HEAD(&dst->clients_data);
1563 mutex_init(&dst->clients_lock);
1564 fs_get_parent(ft, fg);
1565 /*Add dest to dests list- added as first element after the head*/
1566 list_add_tail(&dst->base.list, &fte->dests);
1568 err = mlx5_cmd_fs_set_fte(fs_get_dev(&ft->base),
1572 ft->id, fte->index, fg->id, fte->flow_tag,
1573 fte->action, fte->dests_size, &fte->dests);
1577 list_del(&dst->base.list);
1582 list_del(&dst->base.list);
1585 return ERR_PTR(err);
1588 static char *get_dest_name(struct mlx5_flow_destination *dest)
1590 char *name = kzalloc(sizeof(char) * 20, GFP_KERNEL);
1592 switch (dest->type) {
1593 case MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE:
1594 snprintf(name, 20, "dest_%s_%u", "flow_table",
1597 case MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT:
1598 snprintf(name, 20, "dest_%s_%u", "vport",
1601 case MLX5_FLOW_CONTEXT_DEST_TYPE_TIR:
1602 snprintf(name, 20, "dest_%s_%u", "tir", dest->tir_num);
1609 /* assumed fg is locked */
1610 static unsigned int fs_get_free_fg_index(struct mlx5_flow_group *fg,
1611 struct list_head **prev)
1614 unsigned int start = fg->start_index;
1619 /* assumed list is sorted by index */
1620 fs_for_each_fte(fte, fg) {
1621 if (fte->index != start)
1625 *prev = &fte->base.list;
1632 static struct fs_fte *fs_create_fte(struct mlx5_flow_group *fg,
1636 struct list_head **prev)
1641 index = fs_get_free_fg_index(fg, prev);
1642 fte = fs_alloc_fte(action, flow_tag, match_value, index);
1649 static void add_rule_to_tree(struct mlx5_flow_rule *rule,
1654 dest_name = get_dest_name(&rule->dest_attr);
1655 fs_add_node(&rule->base, &fte->base, dest_name, 1);
1656 /* re-add to list, since fs_add_node reset our list */
1657 list_add_tail(&rule->base.list, &fte->dests);
1659 call_to_add_rule_notifiers(rule, fte);
1662 static void fs_del_dst(struct mlx5_flow_rule *dst)
1664 struct mlx5_flow_table *ft;
1665 struct mlx5_flow_group *fg;
1668 struct mlx5_core_dev *dev = fs_get_dev(&dst->base);
1669 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
1674 match_value = mlx5_vzalloc(match_len);
1676 mlx5_core_warn(dev, "failed to allocate inbox\n");
1680 fs_get_parent(fte, dst);
1681 fs_get_parent(fg, fte);
1682 mutex_lock(&fg->base.lock);
1683 memcpy(match_value, fte->val, sizeof(fte->val));
1684 /* ft can't be changed as fg is locked */
1685 fs_get_parent(ft, fg);
1686 list_del(&dst->base.list);
1688 if (fte->dests_size) {
1689 err = mlx5_cmd_fs_set_fte(dev, ft->vport,
1690 &fte->status, match_value, ft->type,
1691 ft->id, fte->index, fg->id,
1692 fte->flow_tag, fte->action,
1693 fte->dests_size, &fte->dests);
1695 mlx5_core_warn(dev, "%s can't delete dst %s\n",
1696 __func__, dst->base.name);
1700 call_to_del_rule_notifiers(dst, fte);
1702 mutex_unlock(&fg->base.lock);
1703 kvfree(match_value);
1706 static void fs_del_fte(struct fs_fte *fte)
1708 struct mlx5_flow_table *ft;
1709 struct mlx5_flow_group *fg;
1711 struct mlx5_core_dev *dev;
1713 fs_get_parent(fg, fte);
1714 fs_get_parent(ft, fg);
1716 dev = fs_get_dev(&ft->base);
1719 err = mlx5_cmd_fs_delete_fte(dev, ft->vport, &fte->status,
1720 ft->type, ft->id, fte->index);
1722 mlx5_core_warn(dev, "flow steering can't delete fte %s\n",
1728 /* assuming parent fg is locked */
1729 /* Add dst algorithm */
1730 static struct mlx5_flow_rule *fs_add_dst_fg(struct mlx5_flow_group *fg,
1734 struct mlx5_flow_destination *dest)
1737 struct mlx5_flow_rule *dst;
1738 struct mlx5_flow_table *ft;
1739 struct list_head *prev;
1742 mutex_lock(&fg->base.lock);
1743 fs_for_each_fte(fte, fg) {
1744 /* TODO: Check of size against PRM max size */
1745 mutex_lock(&fte->base.lock);
1746 if (fs_match_exact_val(&fg->mask, match_value, &fte->val) &&
1747 action == fte->action && flow_tag == fte->flow_tag) {
1748 dst = _fs_add_dst_fte(fte, fg, dest);
1749 mutex_unlock(&fte->base.lock);
1754 mutex_unlock(&fte->base.lock);
1757 fs_get_parent(ft, fg);
1758 if (fg->num_ftes == fg->max_ftes) {
1759 dst = ERR_PTR(-ENOSPC);
1763 fte = fs_create_fte(fg, match_value, action, flow_tag, &prev);
1768 dst = _fs_add_dst_fte(fte, fg, dest);
1776 snprintf(fte_name, sizeof(fte_name), "fte%u", fte->index);
1777 /* Add node to tree */
1778 fs_add_node(&fte->base, &fg->base, fte_name, 0);
1779 list_add(&fte->base.list, prev);
1781 add_rule_to_tree(dst, fte);
1783 mutex_unlock(&fg->base.lock);
1787 static struct mlx5_flow_rule *fs_add_dst_ft(struct mlx5_flow_table *ft,
1788 u8 match_criteria_enable,
1789 u32 *match_criteria,
1791 u8 action, u32 flow_tag,
1792 struct mlx5_flow_destination *dest)
1794 /*? where dst_entry is allocated*/
1795 struct mlx5_flow_group *g;
1796 struct mlx5_flow_rule *dst;
1799 mutex_lock(&ft->base.lock);
1800 fs_for_each_fg(g, ft)
1801 if (fs_match_exact_mask(g->mask.match_criteria_enable,
1802 match_criteria_enable,
1803 g->mask.match_criteria,
1805 mutex_unlock(&ft->base.lock);
1807 dst = fs_add_dst_fg(g, match_value,
1808 action, flow_tag, dest);
1809 if (PTR_ERR(dst) && PTR_ERR(dst) != -ENOSPC)
1812 mutex_unlock(&ft->base.lock);
1814 g = create_autogroup(ft, match_criteria_enable, match_criteria);
1820 dst = fs_add_dst_fg(g, match_value,
1821 action, flow_tag, dest);
1823 /* Remove assumes refcount > 0 and autogroup creates a group
1824 * with a refcount = 0.
1827 fs_remove_node(&g->base);
1836 struct mlx5_flow_rule *
1837 mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1838 u8 match_criteria_enable,
1839 u32 *match_criteria,
1843 struct mlx5_flow_destination *dest)
1845 struct mlx5_flow_rule *dst;
1846 struct mlx5_flow_namespace *ns;
1848 ns = get_ns_with_notifiers(&ft->base);
1850 down_read(&ns->dests_rw_sem);
1851 dst = fs_add_dst_ft(ft, match_criteria_enable, match_criteria,
1852 match_value, action, flow_tag, dest);
1854 up_read(&ns->dests_rw_sem);
1860 EXPORT_SYMBOL(mlx5_add_flow_rule);
1862 void mlx5_del_flow_rule(struct mlx5_flow_rule *dst)
1864 struct mlx5_flow_namespace *ns;
1866 ns = get_ns_with_notifiers(&dst->base);
1868 down_read(&ns->dests_rw_sem);
1869 fs_remove_node(&dst->base);
1871 up_read(&ns->dests_rw_sem);
1873 EXPORT_SYMBOL(mlx5_del_flow_rule);
1875 #define MLX5_CORE_FS_ROOT_NS_NAME "root"
1876 #define MLX5_CORE_FS_ESW_EGRESS_ACL "esw_egress_root"
1877 #define MLX5_CORE_FS_ESW_INGRESS_ACL "esw_ingress_root"
1878 #define MLX5_CORE_FS_FDB_ROOT_NS_NAME "fdb_root"
1879 #define MLX5_CORE_FS_SNIFFER_RX_ROOT_NS_NAME "sniffer_rx_root"
1880 #define MLX5_CORE_FS_SNIFFER_TX_ROOT_NS_NAME "sniffer_tx_root"
1881 #define MLX5_CORE_FS_PRIO_MAX_FT 4
1882 #define MLX5_CORE_FS_PRIO_MAX_NS 1
1884 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
1885 unsigned prio, int max_ft,
1886 const char *name, u8 flags)
1888 struct fs_prio *fs_prio;
1890 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
1892 return ERR_PTR(-ENOMEM);
1894 fs_prio->base.type = FS_TYPE_PRIO;
1895 fs_add_node(&fs_prio->base, &ns->base, name, 1);
1896 fs_prio->max_ft = max_ft;
1897 fs_prio->max_ns = MLX5_CORE_FS_PRIO_MAX_NS;
1898 fs_prio->prio = prio;
1899 fs_prio->flags = flags;
1900 list_add_tail(&fs_prio->base.list, &ns->prios);
1901 INIT_LIST_HEAD(&fs_prio->objs);
1902 mutex_init(&fs_prio->shared_lock);
1907 static void cleanup_root_ns(struct mlx5_core_dev *dev)
1909 struct mlx5_flow_root_namespace *root_ns = dev->root_ns;
1910 struct fs_prio *iter_prio;
1916 fs_for_each_prio(iter_prio, &root_ns->ns) {
1917 struct mlx5_flow_namespace *iter_ns;
1919 fs_for_each_ns(iter_ns, iter_prio) {
1920 while (!list_empty(&iter_ns->prios)) {
1921 struct fs_base *iter_prio2 =
1922 list_first_entry(&iter_ns->prios,
1926 fs_remove_node(iter_prio2);
1932 fs_for_each_prio(iter_prio, &root_ns->ns) {
1933 while (!list_empty(&iter_prio->objs)) {
1934 struct fs_base *iter_ns =
1935 list_first_entry(&iter_prio->objs,
1939 fs_remove_node(iter_ns);
1943 while (!list_empty(&root_ns->ns.prios)) {
1944 struct fs_base *iter_prio =
1945 list_first_entry(&root_ns->ns.prios,
1949 fs_remove_node(iter_prio);
1952 fs_remove_node(&root_ns->ns.base);
1953 dev->root_ns = NULL;
1956 static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
1957 struct mlx5_flow_root_namespace *root_ns)
1959 struct fs_base *prio;
1964 if (!list_empty(&root_ns->ns.prios)) {
1965 prio = list_first_entry(&root_ns->ns.prios,
1968 fs_remove_node(prio);
1970 fs_remove_node(&root_ns->ns.base);
1974 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
1976 cleanup_root_ns(dev);
1977 cleanup_single_prio_root_ns(dev, dev->sniffer_rx_root_ns);
1978 cleanup_single_prio_root_ns(dev, dev->sniffer_tx_root_ns);
1979 cleanup_single_prio_root_ns(dev, dev->fdb_root_ns);
1980 cleanup_single_prio_root_ns(dev, dev->esw_egress_root_ns);
1981 cleanup_single_prio_root_ns(dev, dev->esw_ingress_root_ns);
1984 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
1987 ns->base.type = FS_TYPE_NAMESPACE;
1988 init_rwsem(&ns->dests_rw_sem);
1989 init_rwsem(&ns->notifiers_rw_sem);
1990 INIT_LIST_HEAD(&ns->prios);
1991 INIT_LIST_HEAD(&ns->list_notifiers);
1996 static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev,
2001 struct mlx5_flow_root_namespace *root_ns;
2002 struct mlx5_flow_namespace *ns;
2004 /* create the root namespace */
2005 root_ns = mlx5_vzalloc(sizeof(*root_ns));
2010 root_ns->table_type = table_type;
2011 mutex_init(&root_ns->fs_chain_lock);
2014 fs_init_namespace(ns);
2015 fs_add_node(&ns->base, NULL, name, 1);
2022 static int init_fdb_root_ns(struct mlx5_core_dev *dev)
2024 struct fs_prio *prio;
2026 dev->fdb_root_ns = create_root_ns(dev, FS_FT_FDB,
2027 MLX5_CORE_FS_FDB_ROOT_NS_NAME);
2028 if (!dev->fdb_root_ns)
2032 prio = fs_create_prio(&dev->fdb_root_ns->ns, 0, 1, "fdb_prio", 0);
2034 return PTR_ERR(prio);
2039 #define MAX_VPORTS 128
2041 static int init_egress_acl_root_ns(struct mlx5_core_dev *dev)
2043 struct fs_prio *prio;
2045 dev->esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL,
2046 MLX5_CORE_FS_ESW_EGRESS_ACL);
2047 if (!dev->esw_egress_root_ns)
2051 prio = fs_create_prio(&dev->esw_egress_root_ns->ns, 0, MAX_VPORTS,
2052 "esw_egress_prio", 0);
2054 return PTR_ERR(prio);
2059 static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
2061 struct fs_prio *prio;
2063 dev->esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL,
2064 MLX5_CORE_FS_ESW_INGRESS_ACL);
2065 if (!dev->esw_ingress_root_ns)
2069 prio = fs_create_prio(&dev->esw_ingress_root_ns->ns, 0, MAX_VPORTS,
2070 "esw_ingress_prio", 0);
2072 return PTR_ERR(prio);
2077 static int init_sniffer_rx_root_ns(struct mlx5_core_dev *dev)
2079 struct fs_prio *prio;
2081 dev->sniffer_rx_root_ns = create_root_ns(dev, FS_FT_SNIFFER_RX,
2082 MLX5_CORE_FS_SNIFFER_RX_ROOT_NS_NAME);
2083 if (!dev->sniffer_rx_root_ns)
2087 prio = fs_create_prio(&dev->sniffer_rx_root_ns->ns, 0, 1,
2090 return PTR_ERR(prio);
2096 static int init_sniffer_tx_root_ns(struct mlx5_core_dev *dev)
2098 struct fs_prio *prio;
2100 dev->sniffer_tx_root_ns = create_root_ns(dev, FS_FT_SNIFFER_TX,
2101 MLX5_CORE_FS_SNIFFER_TX_ROOT_NS_NAME);
2102 if (!dev->sniffer_tx_root_ns)
2106 prio = fs_create_prio(&dev->sniffer_tx_root_ns->ns, 0, 1,
2109 return PTR_ERR(prio);
2114 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2117 struct mlx5_flow_namespace *ns;
2119 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2121 return ERR_PTR(-ENOMEM);
2123 fs_init_namespace(ns);
2124 fs_add_node(&ns->base, &prio->base, name, 1);
2125 list_add_tail(&ns->base.list, &prio->objs);
2130 #define FLOW_TABLE_BIT_SZ 1
2131 #define GET_FLOW_TABLE_CAP(dev, offset) \
2132 ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \
2134 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2136 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2140 for (i = 0; i < caps->arr_sz; i++) {
2141 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2147 static int _init_root_tree(struct mlx5_core_dev *dev, int max_ft_level,
2148 struct init_tree_node *node, struct fs_base *base_parent,
2149 struct init_tree_node *tree_parent)
2151 struct mlx5_flow_namespace *fs_ns;
2152 struct fs_prio *fs_prio;
2154 struct fs_base *base;
2158 if (node->type == FS_TYPE_PRIO) {
2159 if ((node->min_ft_level > max_ft_level) ||
2160 !has_required_caps(dev, &node->caps))
2163 fs_get_obj(fs_ns, base_parent);
2164 priority = node - tree_parent->children;
2165 fs_prio = fs_create_prio(fs_ns, priority,
2167 node->name, node->flags);
2168 if (IS_ERR(fs_prio)) {
2169 err = PTR_ERR(fs_prio);
2172 base = &fs_prio->base;
2173 } else if (node->type == FS_TYPE_NAMESPACE) {
2174 fs_get_obj(fs_prio, base_parent);
2175 fs_ns = fs_create_namespace(fs_prio, node->name);
2176 if (IS_ERR(fs_ns)) {
2177 err = PTR_ERR(fs_ns);
2180 base = &fs_ns->base;
2184 for (i = 0; i < node->ar_size; i++) {
2185 err = _init_root_tree(dev, max_ft_level, &node->children[i], base,
2194 static int init_root_tree(struct mlx5_core_dev *dev, int max_ft_level,
2195 struct init_tree_node *node, struct fs_base *parent)
2198 struct mlx5_flow_namespace *fs_ns;
2201 fs_get_obj(fs_ns, parent);
2202 for (i = 0; i < node->ar_size; i++) {
2203 err = _init_root_tree(dev, max_ft_level,
2204 &node->children[i], &fs_ns->base, node);
2211 static int sum_max_ft_in_prio(struct fs_prio *prio);
2212 static int sum_max_ft_in_ns(struct mlx5_flow_namespace *ns)
2214 struct fs_prio *prio;
2217 fs_for_each_prio(prio, ns) {
2218 sum += sum_max_ft_in_prio(prio);
2223 static int sum_max_ft_in_prio(struct fs_prio *prio)
2227 struct mlx5_flow_namespace *ns;
2230 return prio->max_ft;
2232 fs_for_each_ns_or_ft(it, prio) {
2233 if (it->type == FS_TYPE_FLOW_TABLE)
2237 sum += sum_max_ft_in_ns(ns);
2243 static void set_max_ft(struct mlx5_flow_namespace *ns)
2245 struct fs_prio *prio;
2250 fs_for_each_prio(prio, ns)
2251 sum_max_ft_in_prio(prio);
2254 static int init_root_ns(struct mlx5_core_dev *dev)
2256 int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
2257 flow_table_properties_nic_receive.
2260 dev->root_ns = create_root_ns(dev, FS_FT_NIC_RX,
2261 MLX5_CORE_FS_ROOT_NS_NAME);
2262 if (IS_ERR_OR_NULL(dev->root_ns))
2266 if (init_root_tree(dev, max_ft_level, &root_fs, &dev->root_ns->ns.base))
2269 set_max_ft(&dev->root_ns->ns);
2276 u8 mlx5_get_match_criteria_enable(struct mlx5_flow_rule *rule)
2278 struct fs_base *pbase;
2279 struct mlx5_flow_group *fg;
2281 pbase = rule->base.parent;
2283 pbase = pbase->parent;
2286 fs_get_obj(fg, pbase);
2287 return fg->mask.match_criteria_enable;
2290 void mlx5_get_match_value(u32 *match_value,
2291 struct mlx5_flow_rule *rule)
2293 struct fs_base *pbase;
2296 pbase = rule->base.parent;
2298 fs_get_obj(fte, pbase);
2300 memcpy(match_value, fte->val, sizeof(fte->val));
2303 void mlx5_get_match_criteria(u32 *match_criteria,
2304 struct mlx5_flow_rule *rule)
2306 struct fs_base *pbase;
2307 struct mlx5_flow_group *fg;
2309 pbase = rule->base.parent;
2311 pbase = pbase->parent;
2314 fs_get_obj(fg, pbase);
2315 memcpy(match_criteria, &fg->mask.match_criteria,
2316 sizeof(fg->mask.match_criteria));
2319 int mlx5_init_fs(struct mlx5_core_dev *dev)
2323 if (MLX5_CAP_GEN(dev, nic_flow_table)) {
2324 err = init_root_ns(dev);
2329 err = init_fdb_root_ns(dev);
2333 err = init_egress_acl_root_ns(dev);
2337 err = init_ingress_acl_root_ns(dev);
2341 err = init_sniffer_tx_root_ns(dev);
2345 err = init_sniffer_rx_root_ns(dev);
2351 mlx5_cleanup_fs(dev);
2355 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2356 enum mlx5_flow_namespace_type type)
2358 struct mlx5_flow_root_namespace *root_ns = dev->root_ns;
2360 static struct fs_prio *fs_prio;
2361 struct mlx5_flow_namespace *ns;
2364 case MLX5_FLOW_NAMESPACE_BYPASS:
2367 case MLX5_FLOW_NAMESPACE_KERNEL:
2370 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2373 case MLX5_FLOW_NAMESPACE_FDB:
2374 if (dev->fdb_root_ns)
2375 return &dev->fdb_root_ns->ns;
2378 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2379 if (dev->esw_egress_root_ns)
2380 return &dev->esw_egress_root_ns->ns;
2383 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2384 if (dev->esw_ingress_root_ns)
2385 return &dev->esw_ingress_root_ns->ns;
2388 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2389 if (dev->sniffer_rx_root_ns)
2390 return &dev->sniffer_rx_root_ns->ns;
2393 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2394 if (dev->sniffer_tx_root_ns)
2395 return &dev->sniffer_tx_root_ns->ns;
2405 fs_prio = find_prio(&root_ns->ns, prio);
2409 ns = list_first_entry(&fs_prio->objs,
2415 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2418 int mlx5_set_rule_private_data(struct mlx5_flow_rule *rule,
2419 struct mlx5_flow_handler *fs_handler,
2422 struct fs_client_priv_data *priv_data;
2424 mutex_lock(&rule->clients_lock);
2425 /*Check that hanlder isn't exists in the list already*/
2426 list_for_each_entry(priv_data, &rule->clients_data, list) {
2427 if (priv_data->fs_handler == fs_handler) {
2428 priv_data->client_dst_data = client_data;
2432 priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
2434 mutex_unlock(&rule->clients_lock);
2438 priv_data->client_dst_data = client_data;
2439 priv_data->fs_handler = fs_handler;
2440 list_add(&priv_data->list, &rule->clients_data);
2443 mutex_unlock(&rule->clients_lock);
2448 static int remove_from_clients(struct mlx5_flow_rule *rule,
2453 struct fs_client_priv_data *iter_client;
2454 struct fs_client_priv_data *temp_client;
2455 struct mlx5_flow_handler *handler = (struct
2456 mlx5_flow_handler*)context;
2458 mutex_lock(&rule->clients_lock);
2459 list_for_each_entry_safe(iter_client, temp_client,
2460 &rule->clients_data, list) {
2461 if (iter_client->fs_handler == handler) {
2462 list_del(&iter_client->list);
2467 mutex_unlock(&rule->clients_lock);
2472 struct mlx5_flow_handler *mlx5_register_rule_notifier(struct mlx5_core_dev *dev,
2473 enum mlx5_flow_namespace_type ns_type,
2474 rule_event_fn add_cb,
2475 rule_event_fn del_cb,
2478 struct mlx5_flow_namespace *ns;
2479 struct mlx5_flow_handler *handler;
2481 ns = mlx5_get_flow_namespace(dev, ns_type);
2483 return ERR_PTR(-EINVAL);
2485 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
2487 return ERR_PTR(-ENOMEM);
2489 handler->add_dst_cb = add_cb;
2490 handler->del_dst_cb = del_cb;
2491 handler->client_context = context;
2493 down_write(&ns->notifiers_rw_sem);
2494 list_add_tail(&handler->list, &ns->list_notifiers);
2495 up_write(&ns->notifiers_rw_sem);
2500 static void iterate_rules_in_ns(struct mlx5_flow_namespace *ns,
2501 rule_event_fn add_rule_cb,
2504 void mlx5_unregister_rule_notifier(struct mlx5_flow_handler *handler)
2506 struct mlx5_flow_namespace *ns = handler->ns;
2508 /*Remove from dst's clients*/
2509 down_write(&ns->dests_rw_sem);
2510 down_write(&ns->notifiers_rw_sem);
2511 iterate_rules_in_ns(ns, remove_from_clients, handler);
2512 list_del(&handler->list);
2513 up_write(&ns->notifiers_rw_sem);
2514 up_write(&ns->dests_rw_sem);
2518 static void iterate_rules_in_ft(struct mlx5_flow_table *ft,
2519 rule_event_fn add_rule_cb,
2522 struct mlx5_flow_group *iter_fg;
2523 struct fs_fte *iter_fte;
2524 struct mlx5_flow_rule *iter_rule;
2528 mutex_lock(&ft->base.lock);
2529 fs_for_each_fg(iter_fg, ft) {
2530 mutex_lock(&iter_fg->base.lock);
2531 fs_for_each_fte(iter_fte, iter_fg) {
2532 mutex_lock(&iter_fte->base.lock);
2534 fs_for_each_dst(iter_rule, iter_fte) {
2535 fs_get(&iter_rule->base);
2536 err = add_rule_cb(iter_rule,
2540 fs_put_parent_locked(&iter_rule->base);
2543 is_new_rule = false;
2545 mutex_unlock(&iter_fte->base.lock);
2549 mutex_unlock(&iter_fg->base.lock);
2553 mutex_unlock(&ft->base.lock);
2556 static void iterate_rules_in_prio(struct fs_prio *prio,
2557 rule_event_fn add_rule_cb,
2562 mutex_lock(&prio->base.lock);
2563 fs_for_each_ns_or_ft(it, prio) {
2564 if (it->type == FS_TYPE_FLOW_TABLE) {
2565 struct mlx5_flow_table *ft;
2568 iterate_rules_in_ft(ft, add_rule_cb, context);
2570 struct mlx5_flow_namespace *ns;
2573 iterate_rules_in_ns(ns, add_rule_cb, context);
2576 mutex_unlock(&prio->base.lock);
2579 static void iterate_rules_in_ns(struct mlx5_flow_namespace *ns,
2580 rule_event_fn add_rule_cb,
2583 struct fs_prio *iter_prio;
2585 mutex_lock(&ns->base.lock);
2586 fs_for_each_prio(iter_prio, ns) {
2587 iterate_rules_in_prio(iter_prio, add_rule_cb, context);
2589 mutex_unlock(&ns->base.lock);
2592 void mlx5_flow_iterate_existing_rules(struct mlx5_flow_namespace *ns,
2593 rule_event_fn add_rule_cb,
2596 down_write(&ns->dests_rw_sem);
2597 down_read(&ns->notifiers_rw_sem);
2598 iterate_rules_in_ns(ns, add_rule_cb, context);
2599 up_read(&ns->notifiers_rw_sem);
2600 up_write(&ns->dests_rw_sem);
2604 void mlx5_del_flow_rules_list(struct mlx5_flow_rules_list *rules_list)
2606 struct mlx5_flow_rule_node *iter_node;
2607 struct mlx5_flow_rule_node *temp_node;
2609 list_for_each_entry_safe(iter_node, temp_node, &rules_list->head, list) {
2610 list_del(&iter_node->list);
2617 #define ROCEV1_ETHERTYPE 0x8915
2618 static int set_rocev1_rules(struct list_head *rules_list)
2620 struct mlx5_flow_rule_node *rocev1_rule;
2622 rocev1_rule = kzalloc(sizeof(*rocev1_rule), GFP_KERNEL);
2626 rocev1_rule->match_criteria_enable =
2627 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
2628 MLX5_SET(fte_match_set_lyr_2_4, rocev1_rule->match_criteria, ethertype,
2630 MLX5_SET(fte_match_set_lyr_2_4, rocev1_rule->match_value, ethertype,
2633 list_add_tail(&rocev1_rule->list, rules_list);
2638 #define ROCEV2_UDP_PORT 4791
2639 static int set_rocev2_rules(struct list_head *rules_list)
2641 struct mlx5_flow_rule_node *ipv4_rule;
2642 struct mlx5_flow_rule_node *ipv6_rule;
2644 ipv4_rule = kzalloc(sizeof(*ipv4_rule), GFP_KERNEL);
2648 ipv6_rule = kzalloc(sizeof(*ipv6_rule), GFP_KERNEL);
2654 ipv4_rule->match_criteria_enable =
2655 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
2656 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, ethertype,
2658 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, ethertype,
2660 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, ip_protocol,
2662 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, ip_protocol,
2664 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, udp_dport,
2666 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, udp_dport,
2669 ipv6_rule->match_criteria_enable =
2670 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
2671 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, ethertype,
2673 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, ethertype,
2675 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, ip_protocol,
2677 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, ip_protocol,
2679 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, udp_dport,
2681 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, udp_dport,
2684 list_add_tail(&ipv4_rule->list, rules_list);
2685 list_add_tail(&ipv6_rule->list, rules_list);
2691 struct mlx5_flow_rules_list *get_roce_flow_rules(u8 roce_mode)
2694 struct mlx5_flow_rules_list *rules_list =
2695 kzalloc(sizeof(*rules_list), GFP_KERNEL);
2700 INIT_LIST_HEAD(&rules_list->head);
2702 if (roce_mode & MLX5_ROCE_VERSION_1_CAP) {
2703 err = set_rocev1_rules(&rules_list->head);
2707 if (roce_mode & MLX5_ROCE_VERSION_2_CAP)
2708 err = set_rocev2_rules(&rules_list->head);
2715 mlx5_del_flow_rules_list(rules_list);