2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/module.h>
29 #include <dev/mlx5/driver.h>
30 #include "mlx5_core.h"
32 #include <linux/string.h>
33 #include <linux/compiler.h>
35 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
36 sizeof(struct init_tree_node))
38 #define ADD_PRIO(name_val, flags_val, min_level_val, max_ft_val, caps_val, \
39 ...) {.type = FS_TYPE_PRIO,\
41 .min_ft_level = min_level_val,\
43 .max_ft = max_ft_val,\
45 .children = (struct init_tree_node[]) {__VA_ARGS__},\
46 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
49 #define ADD_FT_PRIO(name_val, flags_val, max_ft_val, ...)\
50 ADD_PRIO(name_val, flags_val, 0, max_ft_val, {},\
53 #define ADD_NS(name_val, ...) {.type = FS_TYPE_NAMESPACE,\
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
59 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
62 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
64 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
65 .caps = (long[]) {__VA_ARGS__}}
67 #define BYPASS_MAX_FT 5
68 #define BYPASS_PRIO_MAX_FT 1
69 #define KERNEL_MAX_FT 3
70 #define LEFTOVER_MAX_FT 1
71 #define KENREL_MIN_LEVEL 3
72 #define LEFTOVER_MIN_LEVEL KENREL_MIN_LEVEL + 1
73 #define BYPASS_MIN_LEVEL MLX5_NUM_BYPASS_FTS + LEFTOVER_MIN_LEVEL
79 struct init_tree_node {
82 struct init_tree_node *children;
84 struct node_caps caps;
90 .type = FS_TYPE_NAMESPACE,
93 .children = (struct init_tree_node[]) {
94 ADD_PRIO("by_pass_prio", 0, BYPASS_MIN_LEVEL, 0,
95 FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
96 FS_CAP(flow_table_properties_nic_receive.modify_root)),
98 ADD_FT_PRIO("prio0", 0,
100 ADD_FT_PRIO("prio1", 0,
102 ADD_FT_PRIO("prio2", 0,
104 ADD_FT_PRIO("prio3", 0,
106 ADD_FT_PRIO("prio4", 0,
108 ADD_FT_PRIO("prio5", 0,
110 ADD_FT_PRIO("prio6", 0,
112 ADD_FT_PRIO("prio7", 0,
114 ADD_FT_PRIO("prio-mcast", 0,
115 BYPASS_PRIO_MAX_FT))),
116 ADD_PRIO("kernel_prio", 0, KENREL_MIN_LEVEL, 0, {},
118 ADD_FT_PRIO("prio_kernel-0", 0,
120 ADD_PRIO("leftovers_prio", MLX5_CORE_FS_PRIO_SHARED,
121 LEFTOVER_MIN_LEVEL, 0,
122 FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
123 FS_CAP(flow_table_properties_nic_receive.modify_root)),
124 ADD_NS("leftover_ns",
125 ADD_FT_PRIO("leftovers_prio-0",
126 MLX5_CORE_FS_PRIO_SHARED,
131 /* Tree creation functions */
133 static struct mlx5_flow_root_namespace *find_root(struct fs_base *node)
135 struct fs_base *parent;
137 /* Make sure we only read it once while we go up the tree */
138 while ((parent = node->parent))
141 if (node->type != FS_TYPE_NAMESPACE) {
142 printf("mlx5_core: WARN: ""mlx5: flow steering node %s is not in tree or garbaged\n", node->name);
146 return container_of(container_of(node,
147 struct mlx5_flow_namespace,
149 struct mlx5_flow_root_namespace,
153 static inline struct mlx5_core_dev *fs_get_dev(struct fs_base *node)
155 struct mlx5_flow_root_namespace *root = find_root(node);
162 static void fs_init_node(struct fs_base *node,
163 unsigned int refcount)
165 kref_init(&node->refcount);
166 atomic_set(&node->users_refcount, refcount);
167 init_completion(&node->complete);
168 INIT_LIST_HEAD(&node->list);
169 mutex_init(&node->lock);
172 static void _fs_add_node(struct fs_base *node,
174 struct fs_base *parent)
177 atomic_inc(&parent->users_refcount);
178 node->name = kstrdup_const(name, GFP_KERNEL);
179 node->parent = parent;
182 static void fs_add_node(struct fs_base *node,
183 struct fs_base *parent, const char *name,
184 unsigned int refcount)
186 fs_init_node(node, refcount);
187 _fs_add_node(node, name, parent);
190 static void _fs_put(struct fs_base *node, void (*kref_cb)(struct kref *kref),
193 static void fs_del_dst(struct mlx5_flow_rule *dst);
194 static void _fs_del_ft(struct mlx5_flow_table *ft);
195 static void fs_del_fg(struct mlx5_flow_group *fg);
196 static void fs_del_fte(struct fs_fte *fte);
198 static void cmd_remove_node(struct fs_base *base)
200 switch (base->type) {
201 case FS_TYPE_FLOW_DEST:
202 fs_del_dst(container_of(base, struct mlx5_flow_rule, base));
204 case FS_TYPE_FLOW_TABLE:
205 _fs_del_ft(container_of(base, struct mlx5_flow_table, base));
207 case FS_TYPE_FLOW_GROUP:
208 fs_del_fg(container_of(base, struct mlx5_flow_group, base));
210 case FS_TYPE_FLOW_ENTRY:
211 fs_del_fte(container_of(base, struct fs_fte, base));
218 static void __fs_remove_node(struct kref *kref)
220 struct fs_base *node = container_of(kref, struct fs_base, refcount);
223 mutex_lock(&node->parent->lock);
224 mutex_lock(&node->lock);
225 cmd_remove_node(node);
226 mutex_unlock(&node->lock);
227 complete(&node->complete);
229 mutex_unlock(&node->parent->lock);
230 _fs_put(node->parent, _fs_remove_node, false);
234 void _fs_remove_node(struct kref *kref)
236 struct fs_base *node = container_of(kref, struct fs_base, refcount);
238 __fs_remove_node(kref);
239 kfree_const(node->name);
243 static void fs_get(struct fs_base *node)
245 atomic_inc(&node->users_refcount);
248 static void _fs_put(struct fs_base *node, void (*kref_cb)(struct kref *kref),
251 struct fs_base *parent_node = node->parent;
253 if (parent_node && !parent_locked)
254 mutex_lock(&parent_node->lock);
255 if (atomic_dec_and_test(&node->users_refcount)) {
257 /*remove from parent's list*/
258 list_del_init(&node->list);
259 mutex_unlock(&parent_node->lock);
261 kref_put(&node->refcount, kref_cb);
262 if (parent_node && parent_locked)
263 mutex_lock(&parent_node->lock);
264 } else if (parent_node && !parent_locked) {
265 mutex_unlock(&parent_node->lock);
269 static void fs_put(struct fs_base *node)
271 _fs_put(node, __fs_remove_node, false);
274 static void fs_put_parent_locked(struct fs_base *node)
276 _fs_put(node, __fs_remove_node, true);
279 static void fs_remove_node(struct fs_base *node)
282 wait_for_completion(&node->complete);
283 kfree_const(node->name);
287 static void fs_remove_node_parent_locked(struct fs_base *node)
289 fs_put_parent_locked(node);
290 wait_for_completion(&node->complete);
291 kfree_const(node->name);
295 static struct fs_fte *fs_alloc_fte(u8 action,
303 fte = kzalloc(sizeof(*fte), GFP_KERNEL);
305 return ERR_PTR(-ENOMEM);
307 memcpy(fte->val, match_value, sizeof(fte->val));
308 fte->base.type = FS_TYPE_FLOW_ENTRY;
310 fte->flow_tag = flow_tag;
312 INIT_LIST_HEAD(&fte->dests);
313 fte->action = action;
318 static struct fs_fte *alloc_star_ft_entry(struct mlx5_flow_table *ft,
319 struct mlx5_flow_group *fg,
325 struct mlx5_flow_rule *dst;
327 if (fg->num_ftes == fg->max_ftes)
328 return ERR_PTR(-ENOSPC);
330 fte = fs_alloc_fte(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
331 MLX5_FS_DEFAULT_FLOW_TAG, match_value, index);
336 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
342 fte->base.parent = &fg->base;
344 dst->dest_attr.type = MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE;
345 dst->base.parent = &fte->base;
346 list_add(&dst->base.list, &fte->dests);
347 /* assumed that the callee creates the star rules sorted by index */
348 list_add_tail(&fte->base.list, &fg->ftes);
358 /* assume that fte can't be changed */
359 static void free_star_fte_entry(struct fs_fte *fte)
361 struct mlx5_flow_group *fg;
362 struct mlx5_flow_rule *dst, *temp;
364 fs_get_parent(fg, fte);
366 list_for_each_entry_safe(dst, temp, &fte->dests, base.list) {
368 list_del(&dst->base.list);
372 list_del(&fte->base.list);
377 static struct mlx5_flow_group *fs_alloc_fg(u32 *create_fg_in)
379 struct mlx5_flow_group *fg;
380 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
381 create_fg_in, match_criteria);
382 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
384 match_criteria_enable);
385 fg = kzalloc(sizeof(*fg), GFP_KERNEL);
387 return ERR_PTR(-ENOMEM);
389 INIT_LIST_HEAD(&fg->ftes);
390 fg->mask.match_criteria_enable = match_criteria_enable;
391 memcpy(&fg->mask.match_criteria, match_criteria,
392 sizeof(fg->mask.match_criteria));
393 fg->base.type = FS_TYPE_FLOW_GROUP;
394 fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
396 fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
397 end_flow_index) - fg->start_index + 1;
401 static struct mlx5_flow_table *find_next_ft(struct fs_prio *prio);
402 static struct mlx5_flow_table *find_prev_ft(struct mlx5_flow_table *curr,
403 struct fs_prio *prio);
405 /* assumed src_ft and dst_ft can't be freed */
406 static int fs_set_star_rule(struct mlx5_core_dev *dev,
407 struct mlx5_flow_table *src_ft,
408 struct mlx5_flow_table *dst_ft)
410 struct mlx5_flow_rule *src_dst;
411 struct fs_fte *src_fte;
414 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
416 src_dst = list_first_entry(&src_ft->star_rule.fte->dests,
417 struct mlx5_flow_rule, base.list);
418 match_value = mlx5_vzalloc(match_len);
420 mlx5_core_warn(dev, "failed to allocate inbox\n");
423 /*Create match context*/
425 fs_get_parent(src_fte, src_dst);
427 src_dst->dest_attr.ft = dst_ft;
429 err = mlx5_cmd_fs_set_fte(dev,
432 match_value, src_ft->type,
433 src_ft->id, src_fte->index,
434 src_ft->star_rule.fg->id,
442 fs_get(&dst_ft->base);
444 mlx5_cmd_fs_delete_fte(dev,
447 src_ft->type, src_ft->id,
456 static int connect_prev_fts(struct fs_prio *locked_prio,
457 struct fs_prio *prev_prio,
458 struct mlx5_flow_table *next_ft)
460 struct mlx5_flow_table *iter;
462 struct mlx5_core_dev *dev = fs_get_dev(&prev_prio->base);
467 mutex_lock(&prev_prio->base.lock);
468 fs_for_each_ft(iter, prev_prio) {
469 struct mlx5_flow_rule *src_dst =
470 list_first_entry(&iter->star_rule.fte->dests,
471 struct mlx5_flow_rule, base.list);
472 struct mlx5_flow_table *prev_ft = src_dst->dest_attr.ft;
474 if (prev_ft == next_ft)
477 err = fs_set_star_rule(dev, iter, next_ft);
480 "mlx5: flow steering can't connect prev and next\n");
483 /* Assume ft's prio is locked */
485 struct fs_prio *prio;
487 fs_get_parent(prio, prev_ft);
488 if (prio == locked_prio)
489 fs_put_parent_locked(&prev_ft->base);
491 fs_put(&prev_ft->base);
497 mutex_unlock(&prev_prio->base.lock);
501 static int create_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio)
503 struct mlx5_flow_group *fg;
507 struct mlx5_flow_table *next_ft;
508 struct mlx5_flow_table *prev_ft;
509 struct mlx5_flow_root_namespace *root = find_root(&prio->base);
510 int fg_inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
511 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
513 fg_in = mlx5_vzalloc(fg_inlen);
515 mlx5_core_warn(root->dev, "failed to allocate inbox\n");
519 match_value = mlx5_vzalloc(match_len);
521 mlx5_core_warn(root->dev, "failed to allocate inbox\n");
526 MLX5_SET(create_flow_group_in, fg_in, start_flow_index, ft->max_fte);
527 MLX5_SET(create_flow_group_in, fg_in, end_flow_index, ft->max_fte);
528 fg = fs_alloc_fg(fg_in);
533 ft->star_rule.fg = fg;
534 err = mlx5_cmd_fs_create_fg(fs_get_dev(&prio->base),
535 fg_in, ft->vport, ft->type,
541 ft->star_rule.fte = alloc_star_ft_entry(ft, fg,
544 if (IS_ERR(ft->star_rule.fte))
547 mutex_lock(&root->fs_chain_lock);
548 next_ft = find_next_ft(prio);
549 err = fs_set_star_rule(root->dev, ft, next_ft);
551 mutex_unlock(&root->fs_chain_lock);
555 struct fs_prio *parent;
557 fs_get_parent(parent, next_ft);
558 fs_put(&next_ft->base);
560 prev_ft = find_prev_ft(ft, prio);
562 struct fs_prio *prev_parent;
564 fs_get_parent(prev_parent, prev_ft);
566 err = connect_prev_fts(NULL, prev_parent, ft);
568 mutex_unlock(&root->fs_chain_lock);
569 goto destroy_chained_star_rule;
571 fs_put(&prev_ft->base);
573 mutex_unlock(&root->fs_chain_lock);
579 destroy_chained_star_rule:
580 fs_set_star_rule(fs_get_dev(&prio->base), ft, NULL);
582 fs_put(&next_ft->base);
584 free_star_fte_entry(ft->star_rule.fte);
585 mlx5_cmd_fs_destroy_fg(fs_get_dev(&ft->base), ft->vport,
596 static void destroy_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio)
599 struct mlx5_flow_root_namespace *root;
600 struct mlx5_core_dev *dev = fs_get_dev(&prio->base);
601 struct mlx5_flow_table *prev_ft, *next_ft;
602 struct fs_prio *prev_prio;
606 root = find_root(&prio->base);
608 printf("mlx5_core: ERR: ""mlx5: flow steering failed to find root of priority %s", prio->base.name);
610 /* In order to ensure atomic deletion, first update
611 * prev ft to point on the next ft.
613 mutex_lock(&root->fs_chain_lock);
614 prev_ft = find_prev_ft(ft, prio);
615 next_ft = find_next_ft(prio);
617 fs_get_parent(prev_prio, prev_ft);
618 /*Prev is connected to ft, only if ft is the first(last) in the prio*/
619 err = connect_prev_fts(prio, prev_prio, next_ft);
621 mlx5_core_warn(root->dev,
622 "flow steering can't connect prev and next of flow table\n");
623 fs_put(&prev_ft->base);
626 err = fs_set_star_rule(root->dev, ft, NULL);
627 /*One put is for fs_get in find next ft*/
629 fs_put(&next_ft->base);
631 fs_put(&next_ft->base);
634 mutex_unlock(&root->fs_chain_lock);
635 err = mlx5_cmd_fs_destroy_fg(dev, ft->vport, ft->type, ft->id,
636 ft->star_rule.fg->id);
639 "flow steering can't destroy star entry group(index:%d) of ft:%s\n", ft->star_rule.fg->start_index,
641 free_star_fte_entry(ft->star_rule.fte);
643 kfree(ft->star_rule.fg);
644 ft->star_rule.fg = NULL;
647 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
650 struct fs_prio *iter_prio;
652 fs_for_each_prio(iter_prio, ns) {
653 if (iter_prio->prio == prio)
660 static unsigned int _alloc_new_level(struct fs_prio *prio,
661 struct mlx5_flow_namespace *match);
663 static unsigned int __alloc_new_level(struct mlx5_flow_namespace *ns,
664 struct fs_prio *prio)
666 unsigned int level = 0;
672 mutex_lock(&ns->base.lock);
673 fs_for_each_prio(p, ns) {
679 mutex_unlock(&ns->base.lock);
681 fs_get_parent(prio, ns);
683 WARN_ON(prio->base.type != FS_TYPE_PRIO);
685 return level + _alloc_new_level(prio, ns);
688 /* Called under lock of priority, hence locking all upper objects */
689 static unsigned int _alloc_new_level(struct fs_prio *prio,
690 struct mlx5_flow_namespace *match)
692 struct mlx5_flow_namespace *ns;
694 unsigned int level = 0;
699 mutex_lock(&prio->base.lock);
700 fs_for_each_ns_or_ft_reverse(it, prio) {
701 if (it->type == FS_TYPE_NAMESPACE) {
707 mutex_lock(&ns->base.lock);
708 fs_for_each_prio(p, ns)
710 mutex_unlock(&ns->base.lock);
715 struct mlx5_flow_table *ft;
718 mutex_unlock(&prio->base.lock);
719 return level + ft->level + 1;
723 fs_get_parent(ns, prio);
724 mutex_unlock(&prio->base.lock);
725 return __alloc_new_level(ns, prio) + level;
728 static unsigned int alloc_new_level(struct fs_prio *prio)
730 return _alloc_new_level(prio, NULL);
733 static int update_root_ft_create(struct mlx5_flow_root_namespace *root,
734 struct mlx5_flow_table *ft)
737 int min_level = INT_MAX;
740 min_level = root->root_ft->level;
742 if (ft->level < min_level)
743 err = mlx5_cmd_update_root_ft(root->dev, ft->type,
749 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
757 static struct mlx5_flow_table *_create_ft_common(struct mlx5_flow_namespace *ns,
759 struct fs_prio *fs_prio,
763 struct mlx5_flow_table *ft;
768 struct mlx5_flow_root_namespace *root =
769 find_root(&ns->base);
772 printf("mlx5_core: ERR: ""mlx5: flow steering failed to find root of namespace %s", ns->base.name);
773 return ERR_PTR(-ENODEV);
776 if (fs_prio->num_ft == fs_prio->max_ft)
777 return ERR_PTR(-ENOSPC);
779 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
781 return ERR_PTR(-ENOMEM);
783 fs_init_node(&ft->base, 1);
784 INIT_LIST_HEAD(&ft->fgs);
786 /* Temporarily WA until we expose the level set in the API */
787 if (root->table_type == FS_FT_ESW_EGRESS_ACL ||
788 root->table_type == FS_FT_ESW_INGRESS_ACL)
791 ft->level = alloc_new_level(fs_prio);
793 ft->base.type = FS_TYPE_FLOW_TABLE;
795 ft->type = root->table_type;
796 /*Two entries are reserved for star rules*/
797 ft_size = roundup_pow_of_two(max_fte + 2);
798 /*User isn't aware to those rules*/
799 ft->max_fte = ft_size - 2;
800 log_table_sz = ilog2(ft_size);
801 err = mlx5_cmd_fs_create_ft(root->dev, ft->vport, ft->type,
802 ft->level, log_table_sz, &ft->id);
806 err = create_star_rule(ft, fs_prio);
810 if ((root->table_type == FS_FT_NIC_RX) && MLX5_CAP_FLOWTABLE(root->dev,
811 flow_table_properties_nic_receive.modify_root)) {
812 err = update_root_ft_create(root, ft);
814 goto destroy_star_rule;
817 if (!name || !strlen(name)) {
818 snprintf(gen_name, 20, "flow_table_%u", ft->id);
819 _fs_add_node(&ft->base, gen_name, &fs_prio->base);
821 _fs_add_node(&ft->base, name, &fs_prio->base);
823 list_add_tail(&ft->base.list, &fs_prio->objs);
829 destroy_star_rule(ft, fs_prio);
831 mlx5_cmd_fs_destroy_ft(root->dev, ft->vport, ft->type, ft->id);
837 static struct mlx5_flow_table *create_ft_common(struct mlx5_flow_namespace *ns,
843 struct fs_prio *fs_prio = NULL;
844 fs_prio = find_prio(ns, prio);
846 return ERR_PTR(-EINVAL);
848 return _create_ft_common(ns, vport, fs_prio, max_fte, name);
852 static struct mlx5_flow_table *find_first_ft_in_ns(struct mlx5_flow_namespace *ns,
853 struct list_head *start);
855 static struct mlx5_flow_table *find_first_ft_in_prio(struct fs_prio *prio,
856 struct list_head *start);
858 static struct mlx5_flow_table *mlx5_create_autogrouped_shared_flow_table(struct fs_prio *fs_prio)
860 struct mlx5_flow_table *ft;
862 ft = find_first_ft_in_prio(fs_prio, &fs_prio->objs);
864 ft->shared_refcount++;
871 struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
874 int num_flow_table_entries,
877 struct mlx5_flow_table *ft = NULL;
878 struct fs_prio *fs_prio;
881 fs_prio = find_prio(ns, prio);
883 return ERR_PTR(-EINVAL);
885 is_shared_prio = fs_prio->flags & MLX5_CORE_FS_PRIO_SHARED;
886 if (is_shared_prio) {
887 mutex_lock(&fs_prio->shared_lock);
888 ft = mlx5_create_autogrouped_shared_flow_table(fs_prio);
894 ft = create_ft_common(ns, 0, prio, num_flow_table_entries,
899 ft->autogroup.active = true;
900 ft->autogroup.max_types = max_num_groups;
902 ft->shared_refcount = 1;
906 mutex_unlock(&fs_prio->shared_lock);
909 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
911 struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
915 int num_flow_table_entries)
917 return create_ft_common(ns, vport, prio, num_flow_table_entries, name);
919 EXPORT_SYMBOL(mlx5_create_vport_flow_table);
921 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
924 int num_flow_table_entries)
926 return create_ft_common(ns, 0, prio, num_flow_table_entries, name);
928 EXPORT_SYMBOL(mlx5_create_flow_table);
930 static void _fs_del_ft(struct mlx5_flow_table *ft)
933 struct mlx5_core_dev *dev = fs_get_dev(&ft->base);
934 struct fs_prio *prio;
936 err = mlx5_cmd_fs_destroy_ft(dev, ft->vport, ft->type, ft->id);
938 mlx5_core_warn(dev, "flow steering can't destroy ft %s\n",
941 fs_get_parent(prio, ft);
945 static int update_root_ft_destroy(struct mlx5_flow_root_namespace *root,
946 struct mlx5_flow_table *ft)
949 struct fs_prio *prio;
950 struct mlx5_flow_table *next_ft = NULL;
951 struct mlx5_flow_table *put_ft = NULL;
953 if (root->root_ft != ft)
956 fs_get_parent(prio, ft);
957 /*Assuming objs containis only flow tables and
958 * flow tables are sorted by level.
960 if (!list_is_last(&ft->base.list, &prio->objs)) {
961 next_ft = list_next_entry(ft, base.list);
963 next_ft = find_next_ft(prio);
968 err = mlx5_cmd_update_root_ft(root->dev, next_ft->type,
971 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
975 root->root_ft = next_ft;
978 fs_put(&put_ft->base);
983 /*Objects in the same prio are destroyed in the reverse order they were createrd*/
984 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
987 struct fs_prio *prio;
988 struct mlx5_flow_root_namespace *root;
991 fs_get_parent(prio, ft);
992 root = find_root(&prio->base);
995 printf("mlx5_core: ERR: ""mlx5: flow steering failed to find root of priority %s", prio->base.name);
999 is_shared_prio = prio->flags & MLX5_CORE_FS_PRIO_SHARED;
1000 if (is_shared_prio) {
1001 mutex_lock(&prio->shared_lock);
1002 if (ft->shared_refcount > 1) {
1003 --ft->shared_refcount;
1005 mutex_unlock(&prio->shared_lock);
1010 mutex_lock(&prio->base.lock);
1011 mutex_lock(&ft->base.lock);
1013 err = update_root_ft_destroy(root, ft);
1017 /* delete two last entries */
1018 destroy_star_rule(ft, prio);
1020 mutex_unlock(&ft->base.lock);
1021 fs_remove_node_parent_locked(&ft->base);
1022 mutex_unlock(&prio->base.lock);
1024 mutex_unlock(&prio->shared_lock);
1029 mutex_unlock(&ft->base.lock);
1030 mutex_unlock(&prio->base.lock);
1032 mutex_unlock(&prio->shared_lock);
1036 EXPORT_SYMBOL(mlx5_destroy_flow_table);
1038 static struct mlx5_flow_group *fs_create_fg(struct mlx5_core_dev *dev,
1039 struct mlx5_flow_table *ft,
1040 struct list_head *prev,
1044 struct mlx5_flow_group *fg;
1046 unsigned int end_index;
1049 fg = fs_alloc_fg(fg_in);
1053 end_index = fg->start_index + fg->max_ftes - 1;
1054 err = mlx5_cmd_fs_create_fg(dev, fg_in,
1055 ft->vport, ft->type, ft->id,
1060 mutex_lock(&ft->base.lock);
1061 if (ft->autogroup.active)
1062 ft->autogroup.num_types++;
1064 snprintf(name, sizeof(name), "group_%u", fg->id);
1065 /*Add node to tree*/
1066 fs_add_node(&fg->base, &ft->base, name, refcount);
1067 /*Add node to group list*/
1068 list_add(&fg->base.list, prev);
1069 mutex_unlock(&ft->base.lock);
1075 return ERR_PTR(err);
1078 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1081 struct mlx5_flow_group *fg;
1082 struct mlx5_core_dev *dev = fs_get_dev(&ft->base);
1085 return ERR_PTR(-ENODEV);
1087 if (ft->autogroup.active)
1088 return ERR_PTR(-EPERM);
1090 fg = fs_create_fg(dev, ft, ft->fgs.prev, in, 1);
1094 EXPORT_SYMBOL(mlx5_create_flow_group);
1096 /*Group is destoyed when all the rules in the group were removed*/
1097 static void fs_del_fg(struct mlx5_flow_group *fg)
1099 struct mlx5_flow_table *parent_ft;
1100 struct mlx5_core_dev *dev;
1102 fs_get_parent(parent_ft, fg);
1103 dev = fs_get_dev(&parent_ft->base);
1106 if (parent_ft->autogroup.active)
1107 parent_ft->autogroup.num_types--;
1109 if (mlx5_cmd_fs_destroy_fg(dev, parent_ft->vport,
1111 parent_ft->id, fg->id))
1112 mlx5_core_warn(dev, "flow steering can't destroy fg\n");
1115 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
1117 fs_remove_node(&fg->base);
1119 EXPORT_SYMBOL(mlx5_destroy_flow_group);
1121 static bool _fs_match_exact_val(void *mask, void *val1, void *val2, size_t size)
1125 /* TODO: optimize by comparing 64bits when possible */
1126 for (i = 0; i < size; i++, mask++, val1++, val2++)
1127 if ((*((u8 *)val1) & (*(u8 *)mask)) !=
1128 ((*(u8 *)val2) & (*(u8 *)mask)))
1134 bool fs_match_exact_val(struct mlx5_core_fs_mask *mask,
1135 void *val1, void *val2)
1137 if (mask->match_criteria_enable &
1138 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
1139 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
1140 val1, outer_headers);
1141 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
1142 val2, outer_headers);
1143 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
1144 mask->match_criteria, outer_headers);
1146 if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
1147 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
1151 if (mask->match_criteria_enable &
1152 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
1153 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
1154 val1, misc_parameters);
1155 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
1156 val2, misc_parameters);
1157 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
1158 mask->match_criteria, misc_parameters);
1160 if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
1161 MLX5_ST_SZ_BYTES(fte_match_set_misc)))
1164 if (mask->match_criteria_enable &
1165 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
1166 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
1167 val1, inner_headers);
1168 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
1169 val2, inner_headers);
1170 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
1171 mask->match_criteria, inner_headers);
1173 if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
1174 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
1180 bool fs_match_exact_mask(u8 match_criteria_enable1,
1181 u8 match_criteria_enable2,
1182 void *mask1, void *mask2)
1184 return match_criteria_enable1 == match_criteria_enable2 &&
1185 !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
1188 static struct mlx5_flow_table *find_first_ft_in_ns_reverse(struct mlx5_flow_namespace *ns,
1189 struct list_head *start);
1191 static struct mlx5_flow_table *_find_first_ft_in_prio_reverse(struct fs_prio *prio,
1192 struct list_head *start)
1194 struct fs_base *it = container_of(start, struct fs_base, list);
1199 fs_for_each_ns_or_ft_continue_reverse(it, prio) {
1200 struct mlx5_flow_namespace *ns;
1201 struct mlx5_flow_table *ft;
1203 if (it->type == FS_TYPE_FLOW_TABLE) {
1210 WARN_ON(ns->base.type != FS_TYPE_NAMESPACE);
1212 ft = find_first_ft_in_ns_reverse(ns, &ns->prios);
1220 static struct mlx5_flow_table *find_first_ft_in_prio_reverse(struct fs_prio *prio,
1221 struct list_head *start)
1223 struct mlx5_flow_table *ft;
1228 mutex_lock(&prio->base.lock);
1229 ft = _find_first_ft_in_prio_reverse(prio, start);
1230 mutex_unlock(&prio->base.lock);
1235 static struct mlx5_flow_table *find_first_ft_in_ns_reverse(struct mlx5_flow_namespace *ns,
1236 struct list_head *start)
1238 struct fs_prio *prio;
1243 fs_get_obj(prio, container_of(start, struct fs_base, list));
1244 mutex_lock(&ns->base.lock);
1245 fs_for_each_prio_continue_reverse(prio, ns) {
1246 struct mlx5_flow_table *ft;
1248 ft = find_first_ft_in_prio_reverse(prio, &prio->objs);
1250 mutex_unlock(&ns->base.lock);
1254 mutex_unlock(&ns->base.lock);
1259 /* Returned a held ft, assumed curr is protected, assumed curr's parent is
1262 static struct mlx5_flow_table *find_prev_ft(struct mlx5_flow_table *curr,
1263 struct fs_prio *prio)
1265 struct mlx5_flow_table *ft = NULL;
1266 struct fs_base *curr_base;
1271 /* prio has either namespace or flow-tables, but not both */
1272 if (!list_empty(&prio->objs) &&
1273 list_first_entry(&prio->objs, struct mlx5_flow_table, base.list) !=
1277 while (!ft && prio) {
1278 struct mlx5_flow_namespace *ns;
1280 fs_get_parent(ns, prio);
1281 ft = find_first_ft_in_ns_reverse(ns, &prio->base.list);
1282 curr_base = &ns->base;
1283 fs_get_parent(prio, ns);
1286 ft = find_first_ft_in_prio_reverse(prio,
1292 static struct mlx5_flow_table *_find_first_ft_in_prio(struct fs_prio *prio,
1293 struct list_head *start)
1295 struct fs_base *it = container_of(start, struct fs_base, list);
1300 fs_for_each_ns_or_ft_continue(it, prio) {
1301 struct mlx5_flow_namespace *ns;
1302 struct mlx5_flow_table *ft;
1304 if (it->type == FS_TYPE_FLOW_TABLE) {
1311 WARN_ON(ns->base.type != FS_TYPE_NAMESPACE);
1313 ft = find_first_ft_in_ns(ns, &ns->prios);
1321 static struct mlx5_flow_table *find_first_ft_in_prio(struct fs_prio *prio,
1322 struct list_head *start)
1324 struct mlx5_flow_table *ft;
1329 mutex_lock(&prio->base.lock);
1330 ft = _find_first_ft_in_prio(prio, start);
1331 mutex_unlock(&prio->base.lock);
1336 static struct mlx5_flow_table *find_first_ft_in_ns(struct mlx5_flow_namespace *ns,
1337 struct list_head *start)
1339 struct fs_prio *prio;
1344 fs_get_obj(prio, container_of(start, struct fs_base, list));
1345 mutex_lock(&ns->base.lock);
1346 fs_for_each_prio_continue(prio, ns) {
1347 struct mlx5_flow_table *ft;
1349 ft = find_first_ft_in_prio(prio, &prio->objs);
1351 mutex_unlock(&ns->base.lock);
1355 mutex_unlock(&ns->base.lock);
1360 /* returned a held ft, assumed curr is protected, assumed curr's parent is
1363 static struct mlx5_flow_table *find_next_ft(struct fs_prio *prio)
1365 struct mlx5_flow_table *ft = NULL;
1366 struct fs_base *curr_base;
1368 while (!ft && prio) {
1369 struct mlx5_flow_namespace *ns;
1371 fs_get_parent(ns, prio);
1372 ft = find_first_ft_in_ns(ns, &prio->base.list);
1373 curr_base = &ns->base;
1374 fs_get_parent(prio, ns);
1377 ft = _find_first_ft_in_prio(prio, &curr_base->list);
1383 /* called under ft mutex lock */
1384 static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
1385 u8 match_criteria_enable,
1386 u32 *match_criteria)
1388 unsigned int group_size;
1389 unsigned int candidate_index = 0;
1390 unsigned int candidate_group_num = 0;
1391 struct mlx5_flow_group *g;
1392 struct mlx5_flow_group *ret;
1393 struct list_head *prev = &ft->fgs;
1394 struct mlx5_core_dev *dev;
1396 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1397 void *match_criteria_addr;
1399 if (!ft->autogroup.active)
1400 return ERR_PTR(-ENOENT);
1402 dev = fs_get_dev(&ft->base);
1404 return ERR_PTR(-ENODEV);
1406 in = mlx5_vzalloc(inlen);
1408 mlx5_core_warn(dev, "failed to allocate inbox\n");
1409 return ERR_PTR(-ENOMEM);
1413 if (ft->autogroup.num_types < ft->autogroup.max_types)
1414 group_size = ft->max_fte / (ft->autogroup.max_types + 1);
1418 if (group_size == 0) {
1420 "flow steering can't create group size of 0\n");
1421 ret = ERR_PTR(-EINVAL);
1425 /* sorted by start_index */
1426 fs_for_each_fg(g, ft) {
1427 candidate_group_num++;
1428 if (candidate_index + group_size > g->start_index)
1429 candidate_index = g->start_index + g->max_ftes;
1432 prev = &g->base.list;
1435 if (candidate_index + group_size > ft->max_fte) {
1436 ret = ERR_PTR(-ENOSPC);
1440 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1441 match_criteria_enable);
1442 MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
1443 MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index +
1445 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1446 in, match_criteria);
1447 memcpy(match_criteria_addr, match_criteria,
1448 MLX5_ST_SZ_BYTES(fte_match_param));
1450 ret = fs_create_fg(dev, ft, prev, in, 0);
1456 static struct mlx5_flow_namespace *get_ns_with_notifiers(struct fs_base *node)
1458 struct mlx5_flow_namespace *ns = NULL;
1460 while (node && (node->type != FS_TYPE_NAMESPACE ||
1461 list_empty(&container_of(node, struct
1462 mlx5_flow_namespace,
1463 base)->list_notifiers)))
1464 node = node->parent;
1467 fs_get_obj(ns, node);
1473 /*Assumption- fte is locked*/
1474 static void call_to_add_rule_notifiers(struct mlx5_flow_rule *dst,
1477 struct mlx5_flow_namespace *ns;
1478 struct mlx5_flow_handler *iter_handler;
1479 struct fs_client_priv_data *iter_client;
1481 bool is_new_rule = list_first_entry(&fte->dests,
1482 struct mlx5_flow_rule,
1486 ns = get_ns_with_notifiers(&fte->base);
1490 down_read(&ns->notifiers_rw_sem);
1491 list_for_each_entry(iter_handler, &ns->list_notifiers,
1493 if (iter_handler->add_dst_cb) {
1495 mutex_lock(&dst->clients_lock);
1496 list_for_each_entry(
1497 iter_client, &dst->clients_data, list) {
1498 if (iter_client->fs_handler == iter_handler) {
1499 data = iter_client->client_dst_data;
1503 mutex_unlock(&dst->clients_lock);
1504 err = iter_handler->add_dst_cb(dst,
1507 iter_handler->client_context);
1512 up_read(&ns->notifiers_rw_sem);
1515 static void call_to_del_rule_notifiers(struct mlx5_flow_rule *dst,
1518 struct mlx5_flow_namespace *ns;
1519 struct mlx5_flow_handler *iter_handler;
1520 struct fs_client_priv_data *iter_client;
1522 bool ctx_changed = (fte->dests_size == 0);
1524 ns = get_ns_with_notifiers(&fte->base);
1527 down_read(&ns->notifiers_rw_sem);
1528 list_for_each_entry(iter_handler, &ns->list_notifiers,
1531 mutex_lock(&dst->clients_lock);
1532 list_for_each_entry(iter_client, &dst->clients_data, list) {
1533 if (iter_client->fs_handler == iter_handler) {
1534 data = iter_client->client_dst_data;
1538 mutex_unlock(&dst->clients_lock);
1539 if (iter_handler->del_dst_cb) {
1540 iter_handler->del_dst_cb(dst, ctx_changed, data,
1541 iter_handler->client_context);
1544 up_read(&ns->notifiers_rw_sem);
1547 /* fte should not be deleted while calling this function */
1548 static struct mlx5_flow_rule *_fs_add_dst_fte(struct fs_fte *fte,
1549 struct mlx5_flow_group *fg,
1550 struct mlx5_flow_destination *dest)
1552 struct mlx5_flow_table *ft;
1553 struct mlx5_flow_rule *dst;
1556 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1558 return ERR_PTR(-ENOMEM);
1560 memcpy(&dst->dest_attr, dest, sizeof(*dest));
1561 dst->base.type = FS_TYPE_FLOW_DEST;
1562 INIT_LIST_HEAD(&dst->clients_data);
1563 mutex_init(&dst->clients_lock);
1564 fs_get_parent(ft, fg);
1565 /*Add dest to dests list- added as first element after the head*/
1566 list_add_tail(&dst->base.list, &fte->dests);
1568 err = mlx5_cmd_fs_set_fte(fs_get_dev(&ft->base),
1572 ft->id, fte->index, fg->id, fte->flow_tag,
1573 fte->action, fte->dests_size, &fte->dests);
1577 list_del(&dst->base.list);
1582 list_del(&dst->base.list);
1585 return ERR_PTR(err);
1588 static char *get_dest_name(struct mlx5_flow_destination *dest)
1590 char *name = kzalloc(sizeof(char) * 20, GFP_KERNEL);
1592 switch (dest->type) {
1593 case MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE:
1594 snprintf(name, 20, "dest_%s_%u", "flow_table",
1597 case MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT:
1598 snprintf(name, 20, "dest_%s_%u", "vport",
1601 case MLX5_FLOW_CONTEXT_DEST_TYPE_TIR:
1602 snprintf(name, 20, "dest_%s_%u", "tir", dest->tir_num);
1610 /* assumed fg is locked */
1611 static unsigned int fs_get_free_fg_index(struct mlx5_flow_group *fg,
1612 struct list_head **prev)
1615 unsigned int start = fg->start_index;
1620 /* assumed list is sorted by index */
1621 fs_for_each_fte(fte, fg) {
1622 if (fte->index != start)
1626 *prev = &fte->base.list;
1633 static struct fs_fte *fs_create_fte(struct mlx5_flow_group *fg,
1637 struct list_head **prev)
1642 index = fs_get_free_fg_index(fg, prev);
1643 fte = fs_alloc_fte(action, flow_tag, match_value, index);
1650 static void add_rule_to_tree(struct mlx5_flow_rule *rule,
1655 dest_name = get_dest_name(&rule->dest_attr);
1656 fs_add_node(&rule->base, &fte->base, dest_name, 1);
1657 /* re-add to list, since fs_add_node reset our list */
1658 list_add_tail(&rule->base.list, &fte->dests);
1660 call_to_add_rule_notifiers(rule, fte);
1663 static void fs_del_dst(struct mlx5_flow_rule *dst)
1665 struct mlx5_flow_table *ft;
1666 struct mlx5_flow_group *fg;
1669 struct mlx5_core_dev *dev = fs_get_dev(&dst->base);
1670 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
1675 match_value = mlx5_vzalloc(match_len);
1677 mlx5_core_warn(dev, "failed to allocate inbox\n");
1681 fs_get_parent(fte, dst);
1682 fs_get_parent(fg, fte);
1683 mutex_lock(&fg->base.lock);
1684 memcpy(match_value, fte->val, sizeof(fte->val));
1685 /* ft can't be changed as fg is locked */
1686 fs_get_parent(ft, fg);
1687 list_del(&dst->base.list);
1689 if (fte->dests_size) {
1690 err = mlx5_cmd_fs_set_fte(dev, ft->vport,
1691 &fte->status, match_value, ft->type,
1692 ft->id, fte->index, fg->id,
1693 fte->flow_tag, fte->action,
1694 fte->dests_size, &fte->dests);
1696 mlx5_core_warn(dev, "%s can't delete dst %s\n",
1697 __func__, dst->base.name);
1701 call_to_del_rule_notifiers(dst, fte);
1703 mutex_unlock(&fg->base.lock);
1704 kvfree(match_value);
1707 static void fs_del_fte(struct fs_fte *fte)
1709 struct mlx5_flow_table *ft;
1710 struct mlx5_flow_group *fg;
1712 struct mlx5_core_dev *dev;
1714 fs_get_parent(fg, fte);
1715 fs_get_parent(ft, fg);
1717 dev = fs_get_dev(&ft->base);
1720 err = mlx5_cmd_fs_delete_fte(dev, ft->vport, &fte->status,
1721 ft->type, ft->id, fte->index);
1723 mlx5_core_warn(dev, "flow steering can't delete fte %s\n",
1729 /* assuming parent fg is locked */
1730 /* Add dst algorithm */
1731 static struct mlx5_flow_rule *fs_add_dst_fg(struct mlx5_flow_group *fg,
1735 struct mlx5_flow_destination *dest)
1738 struct mlx5_flow_rule *dst;
1739 struct mlx5_flow_table *ft;
1740 struct list_head *prev;
1743 mutex_lock(&fg->base.lock);
1744 fs_for_each_fte(fte, fg) {
1745 /* TODO: Check of size against PRM max size */
1746 mutex_lock(&fte->base.lock);
1747 if (fs_match_exact_val(&fg->mask, match_value, &fte->val) &&
1748 action == fte->action && flow_tag == fte->flow_tag) {
1749 dst = _fs_add_dst_fte(fte, fg, dest);
1750 mutex_unlock(&fte->base.lock);
1755 mutex_unlock(&fte->base.lock);
1758 fs_get_parent(ft, fg);
1759 if (fg->num_ftes == fg->max_ftes) {
1760 dst = ERR_PTR(-ENOSPC);
1764 fte = fs_create_fte(fg, match_value, action, flow_tag, &prev);
1769 dst = _fs_add_dst_fte(fte, fg, dest);
1777 snprintf(fte_name, sizeof(fte_name), "fte%u", fte->index);
1778 /* Add node to tree */
1779 fs_add_node(&fte->base, &fg->base, fte_name, 0);
1780 list_add(&fte->base.list, prev);
1782 add_rule_to_tree(dst, fte);
1784 mutex_unlock(&fg->base.lock);
1788 static struct mlx5_flow_rule *fs_add_dst_ft(struct mlx5_flow_table *ft,
1789 u8 match_criteria_enable,
1790 u32 *match_criteria,
1792 u8 action, u32 flow_tag,
1793 struct mlx5_flow_destination *dest)
1795 /*? where dst_entry is allocated*/
1796 struct mlx5_flow_group *g;
1797 struct mlx5_flow_rule *dst;
1800 mutex_lock(&ft->base.lock);
1801 fs_for_each_fg(g, ft)
1802 if (fs_match_exact_mask(g->mask.match_criteria_enable,
1803 match_criteria_enable,
1804 g->mask.match_criteria,
1806 mutex_unlock(&ft->base.lock);
1808 dst = fs_add_dst_fg(g, match_value,
1809 action, flow_tag, dest);
1810 if (PTR_ERR(dst) && PTR_ERR(dst) != -ENOSPC)
1813 mutex_unlock(&ft->base.lock);
1815 g = create_autogroup(ft, match_criteria_enable, match_criteria);
1821 dst = fs_add_dst_fg(g, match_value,
1822 action, flow_tag, dest);
1824 /* Remove assumes refcount > 0 and autogroup creates a group
1825 * with a refcount = 0.
1828 fs_remove_node(&g->base);
1837 struct mlx5_flow_rule *
1838 mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1839 u8 match_criteria_enable,
1840 u32 *match_criteria,
1844 struct mlx5_flow_destination *dest)
1846 struct mlx5_flow_rule *dst;
1847 struct mlx5_flow_namespace *ns;
1849 ns = get_ns_with_notifiers(&ft->base);
1851 down_read(&ns->dests_rw_sem);
1852 dst = fs_add_dst_ft(ft, match_criteria_enable, match_criteria,
1853 match_value, action, flow_tag, dest);
1855 up_read(&ns->dests_rw_sem);
1861 EXPORT_SYMBOL(mlx5_add_flow_rule);
1863 void mlx5_del_flow_rule(struct mlx5_flow_rule *dst)
1865 struct mlx5_flow_namespace *ns;
1867 ns = get_ns_with_notifiers(&dst->base);
1869 down_read(&ns->dests_rw_sem);
1870 fs_remove_node(&dst->base);
1872 up_read(&ns->dests_rw_sem);
1874 EXPORT_SYMBOL(mlx5_del_flow_rule);
1876 #define MLX5_CORE_FS_ROOT_NS_NAME "root"
1877 #define MLX5_CORE_FS_ESW_EGRESS_ACL "esw_egress_root"
1878 #define MLX5_CORE_FS_ESW_INGRESS_ACL "esw_ingress_root"
1879 #define MLX5_CORE_FS_FDB_ROOT_NS_NAME "fdb_root"
1880 #define MLX5_CORE_FS_SNIFFER_RX_ROOT_NS_NAME "sniffer_rx_root"
1881 #define MLX5_CORE_FS_SNIFFER_TX_ROOT_NS_NAME "sniffer_tx_root"
1882 #define MLX5_CORE_FS_PRIO_MAX_FT 4
1883 #define MLX5_CORE_FS_PRIO_MAX_NS 1
1885 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
1886 unsigned prio, int max_ft,
1887 const char *name, u8 flags)
1889 struct fs_prio *fs_prio;
1891 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
1893 return ERR_PTR(-ENOMEM);
1895 fs_prio->base.type = FS_TYPE_PRIO;
1896 fs_add_node(&fs_prio->base, &ns->base, name, 1);
1897 fs_prio->max_ft = max_ft;
1898 fs_prio->max_ns = MLX5_CORE_FS_PRIO_MAX_NS;
1899 fs_prio->prio = prio;
1900 fs_prio->flags = flags;
1901 list_add_tail(&fs_prio->base.list, &ns->prios);
1902 INIT_LIST_HEAD(&fs_prio->objs);
1903 mutex_init(&fs_prio->shared_lock);
1908 static void cleanup_root_ns(struct mlx5_core_dev *dev)
1910 struct mlx5_flow_root_namespace *root_ns = dev->root_ns;
1911 struct fs_prio *iter_prio;
1917 fs_for_each_prio(iter_prio, &root_ns->ns) {
1918 struct mlx5_flow_namespace *iter_ns;
1920 fs_for_each_ns(iter_ns, iter_prio) {
1921 while (!list_empty(&iter_ns->prios)) {
1922 struct fs_base *iter_prio2 =
1923 list_first_entry(&iter_ns->prios,
1927 fs_remove_node(iter_prio2);
1933 fs_for_each_prio(iter_prio, &root_ns->ns) {
1934 while (!list_empty(&iter_prio->objs)) {
1935 struct fs_base *iter_ns =
1936 list_first_entry(&iter_prio->objs,
1940 fs_remove_node(iter_ns);
1944 while (!list_empty(&root_ns->ns.prios)) {
1945 struct fs_base *iter_prio =
1946 list_first_entry(&root_ns->ns.prios,
1950 fs_remove_node(iter_prio);
1953 fs_remove_node(&root_ns->ns.base);
1954 dev->root_ns = NULL;
1957 static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
1958 struct mlx5_flow_root_namespace *root_ns)
1960 struct fs_base *prio;
1965 if (!list_empty(&root_ns->ns.prios)) {
1966 prio = list_first_entry(&root_ns->ns.prios,
1969 fs_remove_node(prio);
1971 fs_remove_node(&root_ns->ns.base);
1975 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
1977 cleanup_root_ns(dev);
1978 cleanup_single_prio_root_ns(dev, dev->sniffer_rx_root_ns);
1979 cleanup_single_prio_root_ns(dev, dev->sniffer_tx_root_ns);
1980 cleanup_single_prio_root_ns(dev, dev->fdb_root_ns);
1981 cleanup_single_prio_root_ns(dev, dev->esw_egress_root_ns);
1982 cleanup_single_prio_root_ns(dev, dev->esw_ingress_root_ns);
1985 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
1988 ns->base.type = FS_TYPE_NAMESPACE;
1989 init_rwsem(&ns->dests_rw_sem);
1990 init_rwsem(&ns->notifiers_rw_sem);
1991 INIT_LIST_HEAD(&ns->prios);
1992 INIT_LIST_HEAD(&ns->list_notifiers);
1997 static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev,
2002 struct mlx5_flow_root_namespace *root_ns;
2003 struct mlx5_flow_namespace *ns;
2005 /* create the root namespace */
2006 root_ns = mlx5_vzalloc(sizeof(*root_ns));
2011 root_ns->table_type = table_type;
2012 mutex_init(&root_ns->fs_chain_lock);
2015 fs_init_namespace(ns);
2016 fs_add_node(&ns->base, NULL, name, 1);
2023 static int init_fdb_root_ns(struct mlx5_core_dev *dev)
2025 struct fs_prio *prio;
2027 dev->fdb_root_ns = create_root_ns(dev, FS_FT_FDB,
2028 MLX5_CORE_FS_FDB_ROOT_NS_NAME);
2029 if (!dev->fdb_root_ns)
2033 prio = fs_create_prio(&dev->fdb_root_ns->ns, 0, 1, "fdb_prio", 0);
2035 return PTR_ERR(prio);
2040 #define MAX_VPORTS 128
2042 static int init_egress_acl_root_ns(struct mlx5_core_dev *dev)
2044 struct fs_prio *prio;
2046 dev->esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL,
2047 MLX5_CORE_FS_ESW_EGRESS_ACL);
2048 if (!dev->esw_egress_root_ns)
2052 prio = fs_create_prio(&dev->esw_egress_root_ns->ns, 0, MAX_VPORTS,
2053 "esw_egress_prio", 0);
2055 return PTR_ERR(prio);
2060 static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
2062 struct fs_prio *prio;
2064 dev->esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL,
2065 MLX5_CORE_FS_ESW_INGRESS_ACL);
2066 if (!dev->esw_ingress_root_ns)
2070 prio = fs_create_prio(&dev->esw_ingress_root_ns->ns, 0, MAX_VPORTS,
2071 "esw_ingress_prio", 0);
2073 return PTR_ERR(prio);
2078 static int init_sniffer_rx_root_ns(struct mlx5_core_dev *dev)
2080 struct fs_prio *prio;
2082 dev->sniffer_rx_root_ns = create_root_ns(dev, FS_FT_SNIFFER_RX,
2083 MLX5_CORE_FS_SNIFFER_RX_ROOT_NS_NAME);
2084 if (!dev->sniffer_rx_root_ns)
2088 prio = fs_create_prio(&dev->sniffer_rx_root_ns->ns, 0, 1,
2091 return PTR_ERR(prio);
2097 static int init_sniffer_tx_root_ns(struct mlx5_core_dev *dev)
2099 struct fs_prio *prio;
2101 dev->sniffer_tx_root_ns = create_root_ns(dev, FS_FT_SNIFFER_TX,
2102 MLX5_CORE_FS_SNIFFER_TX_ROOT_NS_NAME);
2103 if (!dev->sniffer_tx_root_ns)
2107 prio = fs_create_prio(&dev->sniffer_tx_root_ns->ns, 0, 1,
2110 return PTR_ERR(prio);
2115 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2118 struct mlx5_flow_namespace *ns;
2120 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2122 return ERR_PTR(-ENOMEM);
2124 fs_init_namespace(ns);
2125 fs_add_node(&ns->base, &prio->base, name, 1);
2126 list_add_tail(&ns->base.list, &prio->objs);
2131 #define FLOW_TABLE_BIT_SZ 1
2132 #define GET_FLOW_TABLE_CAP(dev, offset) \
2133 ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \
2135 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2137 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2141 for (i = 0; i < caps->arr_sz; i++) {
2142 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2148 static int _init_root_tree(struct mlx5_core_dev *dev, int max_ft_level,
2149 struct init_tree_node *node, struct fs_base *base_parent,
2150 struct init_tree_node *tree_parent)
2152 struct mlx5_flow_namespace *fs_ns;
2153 struct fs_prio *fs_prio;
2155 struct fs_base *base;
2159 if (node->type == FS_TYPE_PRIO) {
2160 if ((node->min_ft_level > max_ft_level) ||
2161 !has_required_caps(dev, &node->caps))
2164 fs_get_obj(fs_ns, base_parent);
2165 priority = node - tree_parent->children;
2166 fs_prio = fs_create_prio(fs_ns, priority,
2168 node->name, node->flags);
2169 if (IS_ERR(fs_prio)) {
2170 err = PTR_ERR(fs_prio);
2173 base = &fs_prio->base;
2174 } else if (node->type == FS_TYPE_NAMESPACE) {
2175 fs_get_obj(fs_prio, base_parent);
2176 fs_ns = fs_create_namespace(fs_prio, node->name);
2177 if (IS_ERR(fs_ns)) {
2178 err = PTR_ERR(fs_ns);
2181 base = &fs_ns->base;
2185 for (i = 0; i < node->ar_size; i++) {
2186 err = _init_root_tree(dev, max_ft_level, &node->children[i], base,
2195 static int init_root_tree(struct mlx5_core_dev *dev, int max_ft_level,
2196 struct init_tree_node *node, struct fs_base *parent)
2199 struct mlx5_flow_namespace *fs_ns;
2202 fs_get_obj(fs_ns, parent);
2203 for (i = 0; i < node->ar_size; i++) {
2204 err = _init_root_tree(dev, max_ft_level,
2205 &node->children[i], &fs_ns->base, node);
2212 static int sum_max_ft_in_prio(struct fs_prio *prio);
2213 static int sum_max_ft_in_ns(struct mlx5_flow_namespace *ns)
2215 struct fs_prio *prio;
2218 fs_for_each_prio(prio, ns) {
2219 sum += sum_max_ft_in_prio(prio);
2224 static int sum_max_ft_in_prio(struct fs_prio *prio)
2228 struct mlx5_flow_namespace *ns;
2231 return prio->max_ft;
2233 fs_for_each_ns_or_ft(it, prio) {
2234 if (it->type == FS_TYPE_FLOW_TABLE)
2238 sum += sum_max_ft_in_ns(ns);
2244 static void set_max_ft(struct mlx5_flow_namespace *ns)
2246 struct fs_prio *prio;
2251 fs_for_each_prio(prio, ns)
2252 sum_max_ft_in_prio(prio);
2255 static int init_root_ns(struct mlx5_core_dev *dev)
2257 int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
2258 flow_table_properties_nic_receive.
2261 dev->root_ns = create_root_ns(dev, FS_FT_NIC_RX,
2262 MLX5_CORE_FS_ROOT_NS_NAME);
2263 if (IS_ERR_OR_NULL(dev->root_ns))
2267 if (init_root_tree(dev, max_ft_level, &root_fs, &dev->root_ns->ns.base))
2270 set_max_ft(&dev->root_ns->ns);
2277 u8 mlx5_get_match_criteria_enable(struct mlx5_flow_rule *rule)
2279 struct fs_base *pbase;
2280 struct mlx5_flow_group *fg;
2282 pbase = rule->base.parent;
2284 pbase = pbase->parent;
2287 fs_get_obj(fg, pbase);
2288 return fg->mask.match_criteria_enable;
2291 void mlx5_get_match_value(u32 *match_value,
2292 struct mlx5_flow_rule *rule)
2294 struct fs_base *pbase;
2297 pbase = rule->base.parent;
2299 fs_get_obj(fte, pbase);
2301 memcpy(match_value, fte->val, sizeof(fte->val));
2304 void mlx5_get_match_criteria(u32 *match_criteria,
2305 struct mlx5_flow_rule *rule)
2307 struct fs_base *pbase;
2308 struct mlx5_flow_group *fg;
2310 pbase = rule->base.parent;
2312 pbase = pbase->parent;
2315 fs_get_obj(fg, pbase);
2316 memcpy(match_criteria, &fg->mask.match_criteria,
2317 sizeof(fg->mask.match_criteria));
2320 int mlx5_init_fs(struct mlx5_core_dev *dev)
2324 if (MLX5_CAP_GEN(dev, nic_flow_table)) {
2325 err = init_root_ns(dev);
2330 err = init_fdb_root_ns(dev);
2334 err = init_egress_acl_root_ns(dev);
2338 err = init_ingress_acl_root_ns(dev);
2342 err = init_sniffer_tx_root_ns(dev);
2346 err = init_sniffer_rx_root_ns(dev);
2352 mlx5_cleanup_fs(dev);
2356 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2357 enum mlx5_flow_namespace_type type)
2359 struct mlx5_flow_root_namespace *root_ns = dev->root_ns;
2361 static struct fs_prio *fs_prio;
2362 struct mlx5_flow_namespace *ns;
2365 case MLX5_FLOW_NAMESPACE_BYPASS:
2368 case MLX5_FLOW_NAMESPACE_KERNEL:
2371 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2374 case MLX5_FLOW_NAMESPACE_FDB:
2375 if (dev->fdb_root_ns)
2376 return &dev->fdb_root_ns->ns;
2379 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2380 if (dev->esw_egress_root_ns)
2381 return &dev->esw_egress_root_ns->ns;
2384 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2385 if (dev->esw_ingress_root_ns)
2386 return &dev->esw_ingress_root_ns->ns;
2389 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2390 if (dev->sniffer_rx_root_ns)
2391 return &dev->sniffer_rx_root_ns->ns;
2394 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2395 if (dev->sniffer_tx_root_ns)
2396 return &dev->sniffer_tx_root_ns->ns;
2406 fs_prio = find_prio(&root_ns->ns, prio);
2410 ns = list_first_entry(&fs_prio->objs,
2416 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2419 int mlx5_set_rule_private_data(struct mlx5_flow_rule *rule,
2420 struct mlx5_flow_handler *fs_handler,
2423 struct fs_client_priv_data *priv_data;
2425 mutex_lock(&rule->clients_lock);
2426 /*Check that hanlder isn't exists in the list already*/
2427 list_for_each_entry(priv_data, &rule->clients_data, list) {
2428 if (priv_data->fs_handler == fs_handler) {
2429 priv_data->client_dst_data = client_data;
2433 priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
2435 mutex_unlock(&rule->clients_lock);
2439 priv_data->client_dst_data = client_data;
2440 priv_data->fs_handler = fs_handler;
2441 list_add(&priv_data->list, &rule->clients_data);
2444 mutex_unlock(&rule->clients_lock);
2449 static int remove_from_clients(struct mlx5_flow_rule *rule,
2454 struct fs_client_priv_data *iter_client;
2455 struct fs_client_priv_data *temp_client;
2456 struct mlx5_flow_handler *handler = (struct
2457 mlx5_flow_handler*)context;
2459 mutex_lock(&rule->clients_lock);
2460 list_for_each_entry_safe(iter_client, temp_client,
2461 &rule->clients_data, list) {
2462 if (iter_client->fs_handler == handler) {
2463 list_del(&iter_client->list);
2468 mutex_unlock(&rule->clients_lock);
2473 struct mlx5_flow_handler *mlx5_register_rule_notifier(struct mlx5_core_dev *dev,
2474 enum mlx5_flow_namespace_type ns_type,
2475 rule_event_fn add_cb,
2476 rule_event_fn del_cb,
2479 struct mlx5_flow_namespace *ns;
2480 struct mlx5_flow_handler *handler;
2482 ns = mlx5_get_flow_namespace(dev, ns_type);
2484 return ERR_PTR(-EINVAL);
2486 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
2488 return ERR_PTR(-ENOMEM);
2490 handler->add_dst_cb = add_cb;
2491 handler->del_dst_cb = del_cb;
2492 handler->client_context = context;
2494 down_write(&ns->notifiers_rw_sem);
2495 list_add_tail(&handler->list, &ns->list_notifiers);
2496 up_write(&ns->notifiers_rw_sem);
2501 static void iterate_rules_in_ns(struct mlx5_flow_namespace *ns,
2502 rule_event_fn add_rule_cb,
2505 void mlx5_unregister_rule_notifier(struct mlx5_flow_handler *handler)
2507 struct mlx5_flow_namespace *ns = handler->ns;
2509 /*Remove from dst's clients*/
2510 down_write(&ns->dests_rw_sem);
2511 down_write(&ns->notifiers_rw_sem);
2512 iterate_rules_in_ns(ns, remove_from_clients, handler);
2513 list_del(&handler->list);
2514 up_write(&ns->notifiers_rw_sem);
2515 up_write(&ns->dests_rw_sem);
2519 static void iterate_rules_in_ft(struct mlx5_flow_table *ft,
2520 rule_event_fn add_rule_cb,
2523 struct mlx5_flow_group *iter_fg;
2524 struct fs_fte *iter_fte;
2525 struct mlx5_flow_rule *iter_rule;
2529 mutex_lock(&ft->base.lock);
2530 fs_for_each_fg(iter_fg, ft) {
2531 mutex_lock(&iter_fg->base.lock);
2532 fs_for_each_fte(iter_fte, iter_fg) {
2533 mutex_lock(&iter_fte->base.lock);
2535 fs_for_each_dst(iter_rule, iter_fte) {
2536 fs_get(&iter_rule->base);
2537 err = add_rule_cb(iter_rule,
2541 fs_put_parent_locked(&iter_rule->base);
2544 is_new_rule = false;
2546 mutex_unlock(&iter_fte->base.lock);
2550 mutex_unlock(&iter_fg->base.lock);
2554 mutex_unlock(&ft->base.lock);
2557 static void iterate_rules_in_prio(struct fs_prio *prio,
2558 rule_event_fn add_rule_cb,
2563 mutex_lock(&prio->base.lock);
2564 fs_for_each_ns_or_ft(it, prio) {
2565 if (it->type == FS_TYPE_FLOW_TABLE) {
2566 struct mlx5_flow_table *ft;
2569 iterate_rules_in_ft(ft, add_rule_cb, context);
2571 struct mlx5_flow_namespace *ns;
2574 iterate_rules_in_ns(ns, add_rule_cb, context);
2577 mutex_unlock(&prio->base.lock);
2580 static void iterate_rules_in_ns(struct mlx5_flow_namespace *ns,
2581 rule_event_fn add_rule_cb,
2584 struct fs_prio *iter_prio;
2586 mutex_lock(&ns->base.lock);
2587 fs_for_each_prio(iter_prio, ns) {
2588 iterate_rules_in_prio(iter_prio, add_rule_cb, context);
2590 mutex_unlock(&ns->base.lock);
2593 void mlx5_flow_iterate_existing_rules(struct mlx5_flow_namespace *ns,
2594 rule_event_fn add_rule_cb,
2597 down_write(&ns->dests_rw_sem);
2598 down_read(&ns->notifiers_rw_sem);
2599 iterate_rules_in_ns(ns, add_rule_cb, context);
2600 up_read(&ns->notifiers_rw_sem);
2601 up_write(&ns->dests_rw_sem);
2605 void mlx5_del_flow_rules_list(struct mlx5_flow_rules_list *rules_list)
2607 struct mlx5_flow_rule_node *iter_node;
2608 struct mlx5_flow_rule_node *temp_node;
2610 list_for_each_entry_safe(iter_node, temp_node, &rules_list->head, list) {
2611 list_del(&iter_node->list);
2618 #define ROCEV1_ETHERTYPE 0x8915
2619 static int set_rocev1_rules(struct list_head *rules_list)
2621 struct mlx5_flow_rule_node *rocev1_rule;
2623 rocev1_rule = kzalloc(sizeof(*rocev1_rule), GFP_KERNEL);
2627 rocev1_rule->match_criteria_enable =
2628 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
2629 MLX5_SET(fte_match_set_lyr_2_4, rocev1_rule->match_criteria, ethertype,
2631 MLX5_SET(fte_match_set_lyr_2_4, rocev1_rule->match_value, ethertype,
2634 list_add_tail(&rocev1_rule->list, rules_list);
2639 #define ROCEV2_UDP_PORT 4791
2640 static int set_rocev2_rules(struct list_head *rules_list)
2642 struct mlx5_flow_rule_node *ipv4_rule;
2643 struct mlx5_flow_rule_node *ipv6_rule;
2645 ipv4_rule = kzalloc(sizeof(*ipv4_rule), GFP_KERNEL);
2649 ipv6_rule = kzalloc(sizeof(*ipv6_rule), GFP_KERNEL);
2655 ipv4_rule->match_criteria_enable =
2656 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
2657 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, ethertype,
2659 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, ethertype,
2661 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, ip_protocol,
2663 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, ip_protocol,
2665 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, udp_dport,
2667 MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, udp_dport,
2670 ipv6_rule->match_criteria_enable =
2671 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
2672 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, ethertype,
2674 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, ethertype,
2676 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, ip_protocol,
2678 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, ip_protocol,
2680 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, udp_dport,
2682 MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, udp_dport,
2685 list_add_tail(&ipv4_rule->list, rules_list);
2686 list_add_tail(&ipv6_rule->list, rules_list);
2692 struct mlx5_flow_rules_list *get_roce_flow_rules(u8 roce_mode)
2695 struct mlx5_flow_rules_list *rules_list =
2696 kzalloc(sizeof(*rules_list), GFP_KERNEL);
2701 INIT_LIST_HEAD(&rules_list->head);
2703 if (roce_mode & MLX5_ROCE_VERSION_1_CAP) {
2704 err = set_rocev1_rules(&rules_list->head);
2708 if (roce_mode & MLX5_ROCE_VERSION_2_CAP)
2709 err = set_rocev2_rules(&rules_list->head);
2716 mlx5_del_flow_rules_list(rules_list);