2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <linux/list.h>
31 #include <dev/mlx5/fs.h>
33 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
49 MLX5E_ACTION_NONE = 0,
54 struct mlx5e_eth_addr_hash_node {
55 LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
57 struct mlx5e_eth_addr_info ai;
61 mlx5e_hash_eth_addr(const u8 * addr)
67 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
70 struct mlx5e_eth_addr_hash_node *hn;
71 int ix = mlx5e_hash_eth_addr(addr);
73 LIST_FOREACH(hn, &hash[ix], hlist) {
74 if (bcmp(hn->ai.addr, addr, ETHER_ADDR_LEN) == 0) {
75 if (hn->action == MLX5E_ACTION_DEL)
76 hn->action = MLX5E_ACTION_NONE;
81 hn = malloc(sizeof(*hn), M_MLX5EN, M_NOWAIT | M_ZERO);
85 ether_addr_copy(hn->ai.addr, addr);
86 hn->action = MLX5E_ACTION_ADD;
88 LIST_INSERT_HEAD(&hash[ix], hn, hlist);
92 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
94 LIST_REMOVE(hn, hlist);
99 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
100 struct mlx5e_eth_addr_info *ai)
102 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_ESP))
103 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
105 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_ESP))
106 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
108 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_AH))
109 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
111 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_AH))
112 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
114 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
115 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
117 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
118 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
120 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
121 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
123 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
124 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
126 if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
127 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
129 if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
130 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
132 if (ai->tt_vec & (1 << MLX5E_TT_ANY))
133 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
135 /* ensure the rules are not freed again */
140 mlx5e_get_eth_addr_type(const u8 * addr)
142 if (ETHER_IS_MULTICAST(addr) == 0)
145 if ((addr[0] == 0x01) &&
149 return (MLX5E_MC_IPV4);
151 if ((addr[0] == 0x33) &&
153 return (MLX5E_MC_IPV6);
155 return (MLX5E_MC_OTHER);
159 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
165 case MLX5E_FULLMATCH:
166 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
167 switch (eth_addr_type) {
170 (1 << MLX5E_TT_IPV4_TCP) |
171 (1 << MLX5E_TT_IPV6_TCP) |
172 (1 << MLX5E_TT_IPV4_UDP) |
173 (1 << MLX5E_TT_IPV6_UDP) |
174 (1 << MLX5E_TT_IPV4) |
175 (1 << MLX5E_TT_IPV6) |
176 (1 << MLX5E_TT_ANY) |
182 (1 << MLX5E_TT_IPV4_UDP) |
183 (1 << MLX5E_TT_IPV4) |
189 (1 << MLX5E_TT_IPV6_UDP) |
190 (1 << MLX5E_TT_IPV6) |
196 (1 << MLX5E_TT_ANY) |
204 (1 << MLX5E_TT_IPV4_UDP) |
205 (1 << MLX5E_TT_IPV6_UDP) |
206 (1 << MLX5E_TT_IPV4) |
207 (1 << MLX5E_TT_IPV6) |
208 (1 << MLX5E_TT_ANY) |
212 default: /* MLX5E_PROMISC */
214 (1 << MLX5E_TT_IPV4_TCP) |
215 (1 << MLX5E_TT_IPV6_TCP) |
216 (1 << MLX5E_TT_IPV4_UDP) |
217 (1 << MLX5E_TT_IPV6_UDP) |
218 (1 << MLX5E_TT_IPV4) |
219 (1 << MLX5E_TT_IPV6) |
220 (1 << MLX5E_TT_ANY) |
229 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
230 struct mlx5e_eth_addr_info *ai, int type,
233 struct mlx5_flow_destination dest = {};
235 struct mlx5_flow_rule **rule_p;
236 struct mlx5_flow_table *ft = priv->fts.main.t;
237 u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
238 outer_headers.dmac_47_16);
239 u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
240 outer_headers.dmac_47_16);
241 u32 *tirn = priv->tirn;
245 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
248 case MLX5E_FULLMATCH:
249 mc_enable = MLX5_MATCH_OUTER_HEADERS;
250 memset(mc_dmac, 0xff, ETH_ALEN);
251 ether_addr_copy(mv_dmac, ai->addr);
255 mc_enable = MLX5_MATCH_OUTER_HEADERS;
266 tt_vec = mlx5e_get_tt_vec(ai, type);
268 if (tt_vec & BIT(MLX5E_TT_ANY)) {
269 rule_p = &ai->ft_rule[MLX5E_TT_ANY];
270 dest.tir_num = tirn[MLX5E_TT_ANY];
271 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
272 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
273 MLX5_FS_ETH_FLOW_TAG, &dest);
274 if (IS_ERR_OR_NULL(*rule_p))
276 ai->tt_vec |= BIT(MLX5E_TT_ANY);
279 mc_enable = MLX5_MATCH_OUTER_HEADERS;
280 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
282 if (tt_vec & BIT(MLX5E_TT_IPV4)) {
283 rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
284 dest.tir_num = tirn[MLX5E_TT_IPV4];
285 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
287 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
288 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
289 MLX5_FS_ETH_FLOW_TAG, &dest);
290 if (IS_ERR_OR_NULL(*rule_p))
292 ai->tt_vec |= BIT(MLX5E_TT_IPV4);
295 if (tt_vec & BIT(MLX5E_TT_IPV6)) {
296 rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
297 dest.tir_num = tirn[MLX5E_TT_IPV6];
298 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
300 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
301 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
302 MLX5_FS_ETH_FLOW_TAG, &dest);
303 if (IS_ERR_OR_NULL(*rule_p))
305 ai->tt_vec |= BIT(MLX5E_TT_IPV6);
308 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
309 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
311 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
312 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
313 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
314 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
316 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
317 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
318 MLX5_FS_ETH_FLOW_TAG, &dest);
319 if (IS_ERR_OR_NULL(*rule_p))
321 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
324 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
325 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
326 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
327 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
329 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
330 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
331 MLX5_FS_ETH_FLOW_TAG, &dest);
332 if (IS_ERR_OR_NULL(*rule_p))
334 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
337 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
339 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
340 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
341 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
342 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
344 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
345 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
346 MLX5_FS_ETH_FLOW_TAG, &dest);
347 if (IS_ERR_OR_NULL(*rule_p))
349 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
352 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
353 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
354 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
355 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
357 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
358 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
359 MLX5_FS_ETH_FLOW_TAG, &dest);
360 if (IS_ERR_OR_NULL(*rule_p))
363 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
366 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
368 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
369 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
370 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
371 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
373 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
374 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
375 MLX5_FS_ETH_FLOW_TAG, &dest);
376 if (IS_ERR_OR_NULL(*rule_p))
378 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
381 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
382 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
383 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
384 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
386 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
387 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
388 MLX5_FS_ETH_FLOW_TAG, &dest);
389 if (IS_ERR_OR_NULL(*rule_p))
391 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
394 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
396 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
397 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
398 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
399 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
401 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
402 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
403 MLX5_FS_ETH_FLOW_TAG, &dest);
404 if (IS_ERR_OR_NULL(*rule_p))
406 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
409 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
410 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
411 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
412 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
414 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
415 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
416 MLX5_FS_ETH_FLOW_TAG, &dest);
417 if (IS_ERR_OR_NULL(*rule_p))
419 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
425 err = PTR_ERR(*rule_p);
427 mlx5e_del_eth_addr_from_flow_table(priv, ai);
433 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
434 struct mlx5e_eth_addr_info *ai, int type)
440 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
441 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
442 if (!match_value || !match_criteria) {
443 if_printf(priv->ifp, "%s: alloc failed\n", __func__);
445 goto add_eth_addr_rule_out;
447 err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
450 add_eth_addr_rule_out:
451 kvfree(match_criteria);
457 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
459 struct ifnet *ifp = priv->ifp;
468 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
471 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
473 if (list_size > max_list_size) {
475 "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
476 list_size, max_list_size);
477 list_size = max_list_size;
480 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
485 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
491 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
493 if_printf(ifp, "Failed to modify vport vlans list err(%d)\n",
500 enum mlx5e_vlan_rule_type {
501 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
502 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
503 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
504 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
508 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
509 enum mlx5e_vlan_rule_type rule_type, u16 vid,
512 struct mlx5_flow_table *ft = priv->fts.vlan.t;
513 struct mlx5_flow_destination dest = {};
515 struct mlx5_flow_rule **rule_p;
518 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
519 dest.ft = priv->fts.main.t;
521 mc_enable = MLX5_MATCH_OUTER_HEADERS;
524 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
525 rule_p = &priv->vlan.untagged_ft_rule;
526 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
528 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
529 rule_p = &priv->vlan.any_cvlan_ft_rule;
530 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
531 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
533 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
534 rule_p = &priv->vlan.any_svlan_ft_rule;
535 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
536 MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
538 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
539 rule_p = &priv->vlan.active_vlans_ft_rule[vid];
540 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
541 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
542 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
543 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
544 mlx5e_vport_context_update_vlans(priv);
548 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
549 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
550 MLX5_FS_ETH_FLOW_TAG,
553 if (IS_ERR(*rule_p)) {
554 err = PTR_ERR(*rule_p);
556 if_printf(priv->ifp, "%s: add rule failed\n", __func__);
563 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
564 enum mlx5e_vlan_rule_type rule_type, u16 vid)
570 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
571 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
572 if (!match_value || !match_criteria) {
573 if_printf(priv->ifp, "%s: alloc failed\n", __func__);
575 goto add_vlan_rule_out;
578 err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
582 kvfree(match_criteria);
589 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
590 enum mlx5e_vlan_rule_type rule_type, u16 vid)
593 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
594 if (priv->vlan.untagged_ft_rule) {
595 mlx5_del_flow_rule(priv->vlan.untagged_ft_rule);
596 priv->vlan.untagged_ft_rule = NULL;
599 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
600 if (priv->vlan.any_cvlan_ft_rule) {
601 mlx5_del_flow_rule(priv->vlan.any_cvlan_ft_rule);
602 priv->vlan.any_cvlan_ft_rule = NULL;
605 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
606 if (priv->vlan.any_svlan_ft_rule) {
607 mlx5_del_flow_rule(priv->vlan.any_svlan_ft_rule);
608 priv->vlan.any_svlan_ft_rule = NULL;
611 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
612 if (priv->vlan.active_vlans_ft_rule[vid]) {
613 mlx5_del_flow_rule(priv->vlan.active_vlans_ft_rule[vid]);
614 priv->vlan.active_vlans_ft_rule[vid] = NULL;
616 mlx5e_vport_context_update_vlans(priv);
624 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
626 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
627 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
631 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
635 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
639 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
641 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
647 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
649 if (priv->vlan.filter_disabled) {
650 priv->vlan.filter_disabled = false;
651 if (priv->ifp->if_flags & IFF_PROMISC)
653 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
654 mlx5e_del_any_vid_rules(priv);
659 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
661 if (!priv->vlan.filter_disabled) {
662 priv->vlan.filter_disabled = true;
663 if (priv->ifp->if_flags & IFF_PROMISC)
665 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
666 mlx5e_add_any_vid_rules(priv);
671 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
673 struct mlx5e_priv *priv = arg;
675 if (ifp != priv->ifp)
679 if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
680 test_bit(MLX5E_STATE_OPENED, &priv->state))
681 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
686 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
688 struct mlx5e_priv *priv = arg;
690 if (ifp != priv->ifp)
694 clear_bit(vid, priv->vlan.active_vlans);
695 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
696 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
701 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
706 set_bit(0, priv->vlan.active_vlans);
707 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
708 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
714 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
718 if (priv->vlan.filter_disabled) {
719 err = mlx5e_add_any_vid_rules(priv);
725 mlx5e_del_all_vlan_rules(priv);
730 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
734 if (priv->vlan.filter_disabled)
735 mlx5e_del_any_vid_rules(priv);
737 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
739 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
740 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
741 clear_bit(0, priv->vlan.active_vlans);
744 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
745 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
746 LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
749 mlx5e_execute_action(struct mlx5e_priv *priv,
750 struct mlx5e_eth_addr_hash_node *hn)
752 switch (hn->action) {
753 case MLX5E_ACTION_ADD:
754 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
755 hn->action = MLX5E_ACTION_NONE;
758 case MLX5E_ACTION_DEL:
759 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
760 mlx5e_del_eth_addr_from_hash(hn);
769 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
771 struct ifnet *ifp = priv->ifp;
773 struct ifmultiaddr *ifma;
775 /* XXX adding this entry might not be needed */
776 mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
777 LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
780 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
781 if (ifa->ifa_addr->sa_family != AF_LINK)
783 mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
784 LLADDR((struct sockaddr_dl *)ifa->ifa_addr));
786 if_addr_runlock(ifp);
789 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
790 if (ifma->ifma_addr->sa_family != AF_LINK)
792 mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc,
793 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
795 if_maddr_runlock(ifp);
798 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
799 u8 addr_array[][ETH_ALEN], int size)
801 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
802 struct ifnet *ifp = priv->ifp;
803 struct mlx5e_eth_addr_hash_node *hn;
804 struct mlx5e_eth_addr_hash_head *addr_list;
805 struct mlx5e_eth_addr_hash_node *tmp;
809 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
811 if (is_uc) /* Make sure our own address is pushed first */
812 ether_addr_copy(addr_array[i++], IF_LLADDR(ifp));
813 else if (priv->eth_addr.broadcast_enabled)
814 ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr);
816 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
817 if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr))
821 ether_addr_copy(addr_array[i++], hn->ai.addr);
825 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
828 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
829 struct mlx5e_eth_addr_hash_node *hn;
830 u8 (*addr_array)[ETH_ALEN] = NULL;
831 struct mlx5e_eth_addr_hash_head *addr_list;
832 struct mlx5e_eth_addr_hash_node *tmp;
838 size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
840 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
841 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
843 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
844 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
847 if (size > max_size) {
849 "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
850 is_uc ? "UC" : "MC", size, max_size);
855 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
860 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
863 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
867 "Failed to modify vport %s list err(%d)\n",
868 is_uc ? "UC" : "MC", err);
872 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
874 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
876 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
877 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
878 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
879 ea->allmulti_enabled,
880 ea->promisc_enabled);
884 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
886 struct mlx5e_eth_addr_hash_node *hn;
887 struct mlx5e_eth_addr_hash_node *tmp;
890 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
891 mlx5e_execute_action(priv, hn);
893 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
894 mlx5e_execute_action(priv, hn);
898 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
900 struct mlx5e_eth_addr_hash_node *hn;
901 struct mlx5e_eth_addr_hash_node *tmp;
904 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
905 hn->action = MLX5E_ACTION_DEL;
906 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
907 hn->action = MLX5E_ACTION_DEL;
909 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
910 mlx5e_sync_ifp_addr(priv);
912 mlx5e_apply_ifp_addr(priv);
916 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
918 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
919 struct ifnet *ndev = priv->ifp;
921 bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
922 bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
923 bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
924 bool broadcast_enabled = rx_mode_enable;
926 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
927 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
928 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
929 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
930 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
931 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
933 /* update broadcast address */
934 ether_addr_copy(priv->eth_addr.broadcast.addr,
935 priv->ifp->if_broadcastaddr);
937 if (enable_promisc) {
938 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
939 if (!priv->vlan.filter_disabled)
940 mlx5e_add_any_vid_rules(priv);
943 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
944 if (enable_broadcast)
945 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
947 mlx5e_handle_ifp_addr(priv);
949 if (disable_broadcast)
950 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
951 if (disable_allmulti)
952 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
953 if (disable_promisc) {
954 if (!priv->vlan.filter_disabled)
955 mlx5e_del_any_vid_rules(priv);
956 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
959 ea->promisc_enabled = promisc_enabled;
960 ea->allmulti_enabled = allmulti_enabled;
961 ea->broadcast_enabled = broadcast_enabled;
963 mlx5e_vport_context_update(priv);
967 mlx5e_set_rx_mode_work(struct work_struct *work)
969 struct mlx5e_priv *priv =
970 container_of(work, struct mlx5e_priv, set_rx_mode_work);
973 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
974 mlx5e_set_rx_mode_core(priv);
979 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
983 for (i = ft->num_groups - 1; i >= 0; i--) {
984 if (!IS_ERR_OR_NULL(ft->g[i]))
985 mlx5_destroy_flow_group(ft->g[i]);
992 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
994 mlx5e_destroy_groups(ft);
996 mlx5_destroy_flow_table(ft->t);
1000 #define MLX5E_NUM_MAIN_GROUPS 10
1001 #define MLX5E_MAIN_GROUP0_SIZE BIT(4)
1002 #define MLX5E_MAIN_GROUP1_SIZE BIT(3)
1003 #define MLX5E_MAIN_GROUP2_SIZE BIT(1)
1004 #define MLX5E_MAIN_GROUP3_SIZE BIT(0)
1005 #define MLX5E_MAIN_GROUP4_SIZE BIT(14)
1006 #define MLX5E_MAIN_GROUP5_SIZE BIT(13)
1007 #define MLX5E_MAIN_GROUP6_SIZE BIT(11)
1008 #define MLX5E_MAIN_GROUP7_SIZE BIT(2)
1009 #define MLX5E_MAIN_GROUP8_SIZE BIT(1)
1010 #define MLX5E_MAIN_GROUP9_SIZE BIT(0)
1011 #define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\
1012 MLX5E_MAIN_GROUP1_SIZE +\
1013 MLX5E_MAIN_GROUP2_SIZE +\
1014 MLX5E_MAIN_GROUP3_SIZE +\
1015 MLX5E_MAIN_GROUP4_SIZE +\
1016 MLX5E_MAIN_GROUP5_SIZE +\
1017 MLX5E_MAIN_GROUP6_SIZE +\
1018 MLX5E_MAIN_GROUP7_SIZE +\
1019 MLX5E_MAIN_GROUP8_SIZE +\
1020 MLX5E_MAIN_GROUP9_SIZE +\
1024 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1027 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1028 u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1029 match_criteria.outer_headers.dmac_47_16);
1033 /* Tunnel rules need to be first in this list of groups */
1035 /* Start tunnel rules */
1036 memset(in, 0, inlen);
1037 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1038 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1039 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1040 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1041 MLX5_SET_CFG(in, start_flow_index, ix);
1042 ix += MLX5E_MAIN_GROUP0_SIZE;
1043 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1044 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1045 if (IS_ERR(ft->g[ft->num_groups]))
1046 goto err_destory_groups;
1048 /* End Tunnel Rules */
1050 memset(in, 0, inlen);
1051 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1052 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1053 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1054 MLX5_SET_CFG(in, start_flow_index, ix);
1055 ix += MLX5E_MAIN_GROUP1_SIZE;
1056 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1057 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1058 if (IS_ERR(ft->g[ft->num_groups]))
1059 goto err_destory_groups;
1062 memset(in, 0, inlen);
1063 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1064 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1065 MLX5_SET_CFG(in, start_flow_index, ix);
1066 ix += MLX5E_MAIN_GROUP2_SIZE;
1067 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1068 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1069 if (IS_ERR(ft->g[ft->num_groups]))
1070 goto err_destory_groups;
1073 memset(in, 0, inlen);
1074 MLX5_SET_CFG(in, start_flow_index, ix);
1075 ix += MLX5E_MAIN_GROUP3_SIZE;
1076 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1077 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1078 if (IS_ERR(ft->g[ft->num_groups]))
1079 goto err_destory_groups;
1082 memset(in, 0, inlen);
1083 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1084 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1085 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1086 memset(dmac, 0xff, ETH_ALEN);
1087 MLX5_SET_CFG(in, start_flow_index, ix);
1088 ix += MLX5E_MAIN_GROUP4_SIZE;
1089 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1090 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1091 if (IS_ERR(ft->g[ft->num_groups]))
1092 goto err_destory_groups;
1095 memset(in, 0, inlen);
1096 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1097 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1098 memset(dmac, 0xff, ETH_ALEN);
1099 MLX5_SET_CFG(in, start_flow_index, ix);
1100 ix += MLX5E_MAIN_GROUP5_SIZE;
1101 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1102 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1103 if (IS_ERR(ft->g[ft->num_groups]))
1104 goto err_destory_groups;
1107 memset(in, 0, inlen);
1108 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1109 memset(dmac, 0xff, ETH_ALEN);
1110 MLX5_SET_CFG(in, start_flow_index, ix);
1111 ix += MLX5E_MAIN_GROUP6_SIZE;
1112 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1113 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1114 if (IS_ERR(ft->g[ft->num_groups]))
1115 goto err_destory_groups;
1118 memset(in, 0, inlen);
1119 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1120 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1121 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1123 MLX5_SET_CFG(in, start_flow_index, ix);
1124 ix += MLX5E_MAIN_GROUP7_SIZE;
1125 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1126 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1127 if (IS_ERR(ft->g[ft->num_groups]))
1128 goto err_destory_groups;
1131 memset(in, 0, inlen);
1132 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1133 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1135 MLX5_SET_CFG(in, start_flow_index, ix);
1136 ix += MLX5E_MAIN_GROUP8_SIZE;
1137 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1138 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1139 if (IS_ERR(ft->g[ft->num_groups]))
1140 goto err_destory_groups;
1143 memset(in, 0, inlen);
1144 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1146 MLX5_SET_CFG(in, start_flow_index, ix);
1147 ix += MLX5E_MAIN_GROUP9_SIZE;
1148 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1149 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1150 if (IS_ERR(ft->g[ft->num_groups]))
1151 goto err_destory_groups;
1157 err = PTR_ERR(ft->g[ft->num_groups]);
1158 ft->g[ft->num_groups] = NULL;
1159 mlx5e_destroy_groups(ft);
1165 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1168 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1171 in = mlx5_vzalloc(inlen);
1175 err = mlx5e_create_main_groups_sub(ft, in, inlen);
1181 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
1183 struct mlx5e_flow_table *ft = &priv->fts.main;
1187 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "main",
1188 MLX5E_MAIN_TABLE_SIZE);
1190 if (IS_ERR(ft->t)) {
1191 err = PTR_ERR(ft->t);
1195 ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1198 goto err_destroy_main_flow_table;
1201 err = mlx5e_create_main_groups(ft);
1209 err_destroy_main_flow_table:
1210 mlx5_destroy_flow_table(ft->t);
1216 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1218 mlx5e_destroy_flow_table(&priv->fts.main);
1221 #define MLX5E_NUM_VLAN_GROUPS 3
1222 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1223 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1224 #define MLX5E_VLAN_GROUP2_SIZE BIT(0)
1225 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1226 MLX5E_VLAN_GROUP1_SIZE +\
1227 MLX5E_VLAN_GROUP2_SIZE +\
1231 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1236 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1238 memset(in, 0, inlen);
1239 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1240 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1241 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1242 MLX5_SET_CFG(in, start_flow_index, ix);
1243 ix += MLX5E_VLAN_GROUP0_SIZE;
1244 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1245 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1246 if (IS_ERR(ft->g[ft->num_groups]))
1247 goto err_destory_groups;
1250 memset(in, 0, inlen);
1251 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1252 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1253 MLX5_SET_CFG(in, start_flow_index, ix);
1254 ix += MLX5E_VLAN_GROUP1_SIZE;
1255 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1256 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1257 if (IS_ERR(ft->g[ft->num_groups]))
1258 goto err_destory_groups;
1261 memset(in, 0, inlen);
1262 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1263 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1264 MLX5_SET_CFG(in, start_flow_index, ix);
1265 ix += MLX5E_VLAN_GROUP2_SIZE;
1266 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1267 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1268 if (IS_ERR(ft->g[ft->num_groups]))
1269 goto err_destory_groups;
1275 err = PTR_ERR(ft->g[ft->num_groups]);
1276 ft->g[ft->num_groups] = NULL;
1277 mlx5e_destroy_groups(ft);
1283 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1286 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1289 in = mlx5_vzalloc(inlen);
1293 err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1300 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1302 struct mlx5e_flow_table *ft = &priv->fts.vlan;
1306 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1307 MLX5E_VLAN_TABLE_SIZE);
1309 if (IS_ERR(ft->t)) {
1310 err = PTR_ERR(ft->t);
1314 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1317 goto err_destroy_vlan_flow_table;
1320 err = mlx5e_create_vlan_groups(ft);
1329 err_destroy_vlan_flow_table:
1330 mlx5_destroy_flow_table(ft->t);
1337 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1339 mlx5e_destroy_flow_table(&priv->fts.vlan);
1342 #define MLX5E_NUM_INNER_RSS_GROUPS 3
1343 #define MLX5E_INNER_RSS_GROUP0_SIZE BIT(3)
1344 #define MLX5E_INNER_RSS_GROUP1_SIZE BIT(1)
1345 #define MLX5E_INNER_RSS_GROUP2_SIZE BIT(0)
1346 #define MLX5E_INNER_RSS_TABLE_SIZE (MLX5E_INNER_RSS_GROUP0_SIZE +\
1347 MLX5E_INNER_RSS_GROUP1_SIZE +\
1348 MLX5E_INNER_RSS_GROUP2_SIZE +\
1352 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1355 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1359 memset(in, 0, inlen);
1360 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1361 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1362 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1363 MLX5_SET_CFG(in, start_flow_index, ix);
1364 ix += MLX5E_INNER_RSS_GROUP0_SIZE;
1365 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1366 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1367 if (IS_ERR(ft->g[ft->num_groups]))
1368 goto err_destory_groups;
1371 memset(in, 0, inlen);
1372 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1373 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1374 MLX5_SET_CFG(in, start_flow_index, ix);
1375 ix += MLX5E_INNER_RSS_GROUP1_SIZE;
1376 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1377 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1378 if (IS_ERR(ft->g[ft->num_groups]))
1379 goto err_destory_groups;
1382 memset(in, 0, inlen);
1383 MLX5_SET_CFG(in, start_flow_index, ix);
1384 ix += MLX5E_INNER_RSS_GROUP2_SIZE;
1385 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1386 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1387 if (IS_ERR(ft->g[ft->num_groups]))
1388 goto err_destory_groups;
1394 err = PTR_ERR(ft->g[ft->num_groups]);
1395 ft->g[ft->num_groups] = NULL;
1396 mlx5e_destroy_groups(ft);
1402 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
1405 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1408 in = mlx5_vzalloc(inlen);
1412 err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
1419 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
1421 struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
1425 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
1426 MLX5E_INNER_RSS_TABLE_SIZE);
1428 if (IS_ERR(ft->t)) {
1429 err = PTR_ERR(ft->t);
1433 ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
1437 goto err_destroy_inner_rss_flow_table;
1440 err = mlx5e_create_inner_rss_groups(ft);
1449 err_destroy_inner_rss_flow_table:
1450 mlx5_destroy_flow_table(ft->t);
1456 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
1458 mlx5e_destroy_flow_table(&priv->fts.inner_rss);
1462 mlx5e_open_flow_table(struct mlx5e_priv *priv)
1466 priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
1467 MLX5_FLOW_NAMESPACE_KERNEL);
1469 err = mlx5e_create_vlan_flow_table(priv);
1473 err = mlx5e_create_main_flow_table(priv);
1475 goto err_destroy_vlan_flow_table;
1477 err = mlx5e_create_inner_rss_flow_table(priv);
1479 goto err_destroy_main_flow_table;
1483 err_destroy_main_flow_table:
1484 mlx5e_destroy_main_flow_table(priv);
1485 err_destroy_vlan_flow_table:
1486 mlx5e_destroy_vlan_flow_table(priv);
1492 mlx5e_close_flow_table(struct mlx5e_priv *priv)
1494 mlx5e_destroy_inner_rss_flow_table(priv);
1495 mlx5e_destroy_main_flow_table(priv);
1496 mlx5e_destroy_vlan_flow_table(priv);