2 * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include "opt_ratelimit.h"
29 #include <dev/mlx5/mlx5_en/en.h>
31 #include <linux/list.h>
32 #include <dev/mlx5/fs.h>
33 #include <dev/mlx5/mpfs.h>
34 #include <dev/mlx5/mlx5_core/fs_tcp.h>
37 * The flow tables with rules define the packet processing on receive.
38 * Currently the following structure is set up to handle different
39 * offloads like TLS RX offload, VLAN decapsulation, packet
40 * classification, RSS hashing, VxLAN checksum offloading:
42 * +=========+ +=========+ +=================+
43 * |TCP/IPv4 | |TCP/IPv4 | |TCP/IPv4 Match |
44 * |Flowtable|------>| |----->|Outer Proto Match|=====> TLS TIR n
45 * | | |Catch-all|\ | |
46 * +=========+ +=========+| +=================+
48 * +------------------------+
50 * +=========+ +=========+ +=================+
51 * |TCP/IPv6 | |TCP/IPv6 | |TCP/IPv6 Match |
52 * |Flowtable|------>| |----->|Outer Proto Match|=====> TLS TIR n
53 * | | |Catch-all|\ | |
54 * +=========+ +=========+| +=================+
56 * +------------------------+
58 * +=========+ +=========+ +=================+
59 * |VLAN ft: | |VxLAN | |VxLAN Main |
60 * |CTAG/STAG|------>| VNI|----->|Inner Proto Match|=====> Inner TIR n
61 * |VID/noVID|/ |Catch-all|\ | |
62 * +=========+ +=========+| +=================+
69 * |Outer Proto Match|=====> TIR n
73 * The path through flow rules directs each packet into an appropriate TIR,
75 * - VLAN encapsulation
77 * - Presence of inner protocol
80 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
96 MLX5E_ACTION_NONE = 0,
101 struct mlx5e_eth_addr_hash_node {
102 LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
105 struct mlx5e_eth_addr_info ai;
108 static void mlx5e_del_all_vlan_rules(struct mlx5e_priv *);
111 mlx5e_hash_eth_addr(const u8 * addr)
117 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
118 struct mlx5e_eth_addr_hash_node *hn_new)
120 struct mlx5e_eth_addr_hash_node *hn;
121 u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr);
123 LIST_FOREACH(hn, &hash[ix], hlist) {
124 if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) {
125 if (hn->action == MLX5E_ACTION_DEL)
126 hn->action = MLX5E_ACTION_NONE;
127 free(hn_new, M_MLX5EN);
131 LIST_INSERT_HEAD(&hash[ix], hn_new, hlist);
136 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
138 LIST_REMOVE(hn, hlist);
143 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
144 struct mlx5e_eth_addr_info *ai)
146 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
147 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
148 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
149 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
150 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_TCP]);
151 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_TCP]);
152 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_UDP]);
153 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_UDP]);
154 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6]);
155 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4]);
156 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_ANY]);
160 mlx5e_get_eth_addr_type(const u8 * addr)
162 if (ETHER_IS_MULTICAST(addr) == 0)
165 if ((addr[0] == 0x01) &&
169 return (MLX5E_MC_IPV4);
171 if ((addr[0] == 0x33) &&
173 return (MLX5E_MC_IPV6);
175 return (MLX5E_MC_OTHER);
179 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
185 case MLX5E_FULLMATCH:
186 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
187 switch (eth_addr_type) {
190 (1 << MLX5E_TT_IPV4_TCP) |
191 (1 << MLX5E_TT_IPV6_TCP) |
192 (1 << MLX5E_TT_IPV4_UDP) |
193 (1 << MLX5E_TT_IPV6_UDP) |
194 (1 << MLX5E_TT_IPV4) |
195 (1 << MLX5E_TT_IPV6) |
196 (1 << MLX5E_TT_ANY) |
202 (1 << MLX5E_TT_IPV4_UDP) |
203 (1 << MLX5E_TT_IPV4) |
209 (1 << MLX5E_TT_IPV6_UDP) |
210 (1 << MLX5E_TT_IPV6) |
216 (1 << MLX5E_TT_ANY) |
224 (1 << MLX5E_TT_IPV4_UDP) |
225 (1 << MLX5E_TT_IPV6_UDP) |
226 (1 << MLX5E_TT_IPV4) |
227 (1 << MLX5E_TT_IPV6) |
228 (1 << MLX5E_TT_ANY) |
232 default: /* MLX5E_PROMISC */
234 (1 << MLX5E_TT_IPV4_TCP) |
235 (1 << MLX5E_TT_IPV6_TCP) |
236 (1 << MLX5E_TT_IPV4_UDP) |
237 (1 << MLX5E_TT_IPV6_UDP) |
238 (1 << MLX5E_TT_IPV4) |
239 (1 << MLX5E_TT_IPV6) |
240 (1 << MLX5E_TT_ANY) |
249 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
250 struct mlx5e_eth_addr_info *ai, int type,
253 struct mlx5_flow_destination dest = {};
255 struct mlx5_flow_rule **rule_p;
256 struct mlx5_flow_table *ft = priv->fts.main.t;
257 u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
258 outer_headers.dmac_47_16);
259 u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
260 outer_headers.dmac_47_16);
261 u32 *tirn = priv->tirn;
264 struct mlx5_flow_act flow_act = {
265 .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
266 .flow_tag = MLX5_FS_ETH_FLOW_TAG,
269 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
272 case MLX5E_FULLMATCH:
273 mc_enable = MLX5_MATCH_OUTER_HEADERS;
274 memset(mc_dmac, 0xff, ETH_ALEN);
275 ether_addr_copy(mv_dmac, ai->addr);
279 mc_enable = MLX5_MATCH_OUTER_HEADERS;
290 tt_vec = mlx5e_get_tt_vec(ai, type);
292 if (tt_vec & BIT(MLX5E_TT_ANY)) {
293 rule_p = &ai->ft_rule[MLX5E_TT_ANY];
294 dest.tir_num = tirn[MLX5E_TT_ANY];
295 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
296 MLX5_FLOW_RULE_FWD_ACTION_DEST,
298 if (IS_ERR_OR_NULL(*rule_p))
302 mc_enable = MLX5_MATCH_OUTER_HEADERS;
303 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
305 if (tt_vec & BIT(MLX5E_TT_IPV4)) {
306 rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
307 dest.tir_num = tirn[MLX5E_TT_IPV4];
308 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
310 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
311 MLX5_FLOW_RULE_FWD_ACTION_DEST,
313 if (IS_ERR_OR_NULL(*rule_p))
317 if (tt_vec & BIT(MLX5E_TT_IPV6)) {
318 rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
319 dest.tir_num = tirn[MLX5E_TT_IPV6];
320 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
322 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
323 MLX5_FLOW_RULE_FWD_ACTION_DEST,
325 if (IS_ERR_OR_NULL(*rule_p))
329 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
330 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
332 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
333 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
334 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
335 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
337 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
338 MLX5_FLOW_RULE_FWD_ACTION_DEST,
340 if (IS_ERR_OR_NULL(*rule_p))
344 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
345 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
346 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
347 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
349 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
350 MLX5_FLOW_RULE_FWD_ACTION_DEST,
352 if (IS_ERR_OR_NULL(*rule_p))
356 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
358 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
359 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
360 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
361 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
363 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
364 MLX5_FLOW_RULE_FWD_ACTION_DEST,
366 if (IS_ERR_OR_NULL(*rule_p))
370 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
371 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
372 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
373 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
375 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
376 MLX5_FLOW_RULE_FWD_ACTION_DEST,
378 if (IS_ERR_OR_NULL(*rule_p))
382 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
384 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
385 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
386 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
387 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
389 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
390 MLX5_FLOW_RULE_FWD_ACTION_DEST,
392 if (IS_ERR_OR_NULL(*rule_p))
396 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
397 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
398 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
399 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
401 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
402 MLX5_FLOW_RULE_FWD_ACTION_DEST,
404 if (IS_ERR_OR_NULL(*rule_p))
408 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
410 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
411 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
412 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
413 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
415 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
416 MLX5_FLOW_RULE_FWD_ACTION_DEST,
418 if (IS_ERR_OR_NULL(*rule_p))
422 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
423 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
424 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
425 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
427 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
428 MLX5_FLOW_RULE_FWD_ACTION_DEST,
430 if (IS_ERR_OR_NULL(*rule_p))
437 err = PTR_ERR(*rule_p);
439 mlx5e_del_eth_addr_from_flow_table(priv, ai);
445 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
446 struct mlx5e_eth_addr_info *ai, int type)
452 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
453 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
454 if (!match_value || !match_criteria) {
455 mlx5_en_err(priv->ifp, "alloc failed\n");
457 goto add_eth_addr_rule_out;
459 err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
462 add_eth_addr_rule_out:
463 kvfree(match_criteria);
470 mlx5e_del_main_vxlan_rules(struct mlx5e_priv *priv)
472 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
473 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
474 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH]);
475 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH]);
476 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP]);
477 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP]);
478 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP]);
479 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP]);
480 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6]);
481 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4]);
482 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_ANY]);
486 mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
488 struct mlx5_flow_destination dest = {};
490 struct mlx5_flow_rule **rule_p;
491 struct mlx5_flow_table *ft = priv->fts.main_vxlan.t;
492 u32 *tirn = priv->tirn_inner_vxlan;
493 struct mlx5_flow_act flow_act = {
494 .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
495 .flow_tag = MLX5_FS_ETH_FLOW_TAG,
499 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
501 mc_enable = MLX5_MATCH_INNER_HEADERS;
502 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
504 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4];
505 dest.tir_num = tirn[MLX5E_TT_IPV4];
506 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
507 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
508 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
509 if (IS_ERR_OR_NULL(*rule_p))
512 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6];
513 dest.tir_num = tirn[MLX5E_TT_IPV6];
514 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
515 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
516 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
517 if (IS_ERR_OR_NULL(*rule_p))
520 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
521 MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_UDP);
523 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP];
524 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
525 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
526 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
527 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
528 if (IS_ERR_OR_NULL(*rule_p))
531 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP];
532 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
533 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
534 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
535 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
536 if (IS_ERR_OR_NULL(*rule_p))
539 MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_TCP);
541 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP];
542 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
543 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
544 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
545 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
546 if (IS_ERR_OR_NULL(*rule_p))
549 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP];
550 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
551 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
552 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
553 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
554 if (IS_ERR_OR_NULL(*rule_p))
557 MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_AH);
559 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH];
560 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
561 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
562 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
563 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
564 if (IS_ERR_OR_NULL(*rule_p))
567 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH];
568 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
569 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
570 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
571 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
572 if (IS_ERR_OR_NULL(*rule_p))
575 MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_ESP);
577 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP];
578 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
579 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
580 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
581 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
582 if (IS_ERR_OR_NULL(*rule_p))
585 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP];
586 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
587 MLX5_SET(fte_match_param, mv, inner_headers.ethertype,
589 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
590 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
591 if (IS_ERR_OR_NULL(*rule_p))
595 memset(mv, 0, MLX5_ST_SZ_BYTES(fte_match_param));
596 memset(mc, 0, MLX5_ST_SZ_BYTES(fte_match_param));
597 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_ANY];
598 dest.tir_num = tirn[MLX5E_TT_ANY];
599 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
600 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
601 if (IS_ERR_OR_NULL(*rule_p))
607 err = PTR_ERR(*rule_p);
609 mlx5e_del_main_vxlan_rules(priv);
615 mlx5e_add_main_vxlan_rules(struct mlx5e_priv *priv)
621 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
622 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
623 if (match_value == NULL || match_criteria == NULL) {
624 mlx5_en_err(priv->ifp, "alloc failed\n");
626 goto add_main_vxlan_rules_out;
628 err = mlx5e_add_main_vxlan_rules_sub(priv, match_criteria, match_value);
630 add_main_vxlan_rules_out:
631 kvfree(match_criteria);
637 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
639 if_t ifp = priv->ifp;
648 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
651 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
653 if (list_size > max_list_size) {
655 "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
656 list_size, max_list_size);
657 list_size = max_list_size;
660 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
665 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
671 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
673 mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n",
680 enum mlx5e_vlan_rule_type {
681 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
682 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
683 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
684 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
688 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
689 enum mlx5e_vlan_rule_type rule_type, u16 vid,
692 struct mlx5_flow_table *ft = priv->fts.vlan.t;
693 struct mlx5_flow_destination dest = {};
695 struct mlx5_flow_rule **rule_p;
697 struct mlx5_flow_act flow_act = {
698 .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
699 .flow_tag = MLX5_FS_ETH_FLOW_TAG,
702 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
703 dest.ft = priv->fts.vxlan.t;
705 mc_enable = MLX5_MATCH_OUTER_HEADERS;
708 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
709 rule_p = &priv->vlan.untagged_ft_rule;
710 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
712 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
713 rule_p = &priv->vlan.any_cvlan_ft_rule;
714 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
715 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
717 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
718 rule_p = &priv->vlan.any_svlan_ft_rule;
719 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
720 MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
722 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
723 rule_p = &priv->vlan.active_vlans_ft_rule[vid];
724 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
725 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
726 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
727 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
728 mlx5e_vport_context_update_vlans(priv);
732 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
733 MLX5_FLOW_RULE_FWD_ACTION_DEST,
737 if (IS_ERR(*rule_p)) {
738 err = PTR_ERR(*rule_p);
740 mlx5_en_err(priv->ifp, "add rule failed\n");
747 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
748 enum mlx5e_vlan_rule_type rule_type, u16 vid)
754 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
755 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
756 if (!match_value || !match_criteria) {
757 mlx5_en_err(priv->ifp, "alloc failed\n");
759 goto add_vlan_rule_out;
762 err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
766 kvfree(match_criteria);
773 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
774 enum mlx5e_vlan_rule_type rule_type, u16 vid)
777 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
778 mlx5_del_flow_rule(&priv->vlan.untagged_ft_rule);
780 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
781 mlx5_del_flow_rule(&priv->vlan.any_cvlan_ft_rule);
783 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
784 mlx5_del_flow_rule(&priv->vlan.any_svlan_ft_rule);
786 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
787 mlx5_del_flow_rule(&priv->vlan.active_vlans_ft_rule[vid]);
788 mlx5e_vport_context_update_vlans(priv);
796 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
798 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
799 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
803 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
807 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
811 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
813 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
819 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
821 if (priv->vlan.filter_disabled) {
822 priv->vlan.filter_disabled = false;
823 if (if_getflags(priv->ifp) & IFF_PROMISC)
825 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
826 mlx5e_del_any_vid_rules(priv);
831 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
833 if (!priv->vlan.filter_disabled) {
834 priv->vlan.filter_disabled = true;
835 if (if_getflags(priv->ifp) & IFF_PROMISC)
837 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
838 mlx5e_add_any_vid_rules(priv);
843 mlx5e_vlan_rx_add_vid(void *arg, if_t ifp, u16 vid)
845 struct mlx5e_priv *priv = arg;
847 if (ifp != priv->ifp)
851 if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
852 test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
853 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
858 mlx5e_vlan_rx_kill_vid(void *arg, if_t ifp, u16 vid)
860 struct mlx5e_priv *priv = arg;
862 if (ifp != priv->ifp)
866 clear_bit(vid, priv->vlan.active_vlans);
867 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
868 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
873 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
878 set_bit(0, priv->vlan.active_vlans);
879 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
880 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
886 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
890 if (priv->vlan.filter_disabled) {
891 err = mlx5e_add_any_vid_rules(priv);
897 mlx5e_del_all_vlan_rules(priv);
902 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
906 if (priv->vlan.filter_disabled)
907 mlx5e_del_any_vid_rules(priv);
909 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
911 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
912 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
913 clear_bit(0, priv->vlan.active_vlans);
916 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
917 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
918 LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
921 mlx5e_execute_action(struct mlx5e_priv *priv,
922 struct mlx5e_eth_addr_hash_node *hn)
924 switch (hn->action) {
925 case MLX5E_ACTION_ADD:
926 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
927 hn->action = MLX5E_ACTION_NONE;
930 case MLX5E_ACTION_DEL:
931 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
932 if (hn->mpfs_index != -1U)
933 mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index);
934 mlx5e_del_eth_addr_from_hash(hn);
942 static struct mlx5e_eth_addr_hash_node *
943 mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh)
945 struct mlx5e_eth_addr_hash_node *hn;
949 LIST_REMOVE(hn, hlist);
950 LIST_INSERT_HEAD(uh, hn, hlist);
955 static struct mlx5e_eth_addr_hash_node *
956 mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh)
958 struct mlx5e_eth_addr_hash_node *hn;
962 LIST_REMOVE(hn, hlist);
966 struct mlx5e_copy_addr_ctx {
967 struct mlx5e_eth_addr_hash_head *free;
968 struct mlx5e_eth_addr_hash_head *fill;
973 mlx5e_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
975 struct mlx5e_copy_addr_ctx *ctx = arg;
976 struct mlx5e_eth_addr_hash_node *hn;
978 hn = mlx5e_move_hn(ctx->free, ctx->fill);
980 ctx->success = false;
983 ether_addr_copy(hn->ai.addr, LLADDR(sdl));
989 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
991 struct mlx5e_copy_addr_ctx ctx;
992 struct mlx5e_eth_addr_hash_head head_free;
993 struct mlx5e_eth_addr_hash_head head_uc;
994 struct mlx5e_eth_addr_hash_head head_mc;
995 struct mlx5e_eth_addr_hash_node *hn;
996 if_t ifp = priv->ifp;
1000 PRIV_ASSERT_LOCKED(priv);
1003 LIST_INIT(&head_free);
1004 LIST_INIT(&head_uc);
1005 LIST_INIT(&head_mc);
1006 num = 1 + if_lladdr_count(ifp) + if_llmaddr_count(ifp);
1008 /* allocate place holders */
1009 for (x = 0; x != num; x++) {
1010 hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO);
1011 hn->action = MLX5E_ACTION_ADD;
1012 hn->mpfs_index = -1U;
1013 LIST_INSERT_HEAD(&head_free, hn, hlist);
1016 hn = mlx5e_move_hn(&head_free, &head_uc);
1019 ether_addr_copy(hn->ai.addr, if_getlladdr(ifp));
1021 ctx.free = &head_free;
1022 ctx.fill = &head_uc;
1024 if_foreach_lladdr(ifp, mlx5e_copy_addr, &ctx);
1025 if (ctx.success == false)
1028 ctx.fill = &head_mc;
1029 if_foreach_llmaddr(ifp, mlx5e_copy_addr, &ctx);
1030 if (ctx.success == false)
1033 /* insert L2 unicast addresses into hash list */
1035 while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) {
1036 if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0)
1038 if (hn->mpfs_index == -1U)
1039 mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index,
1043 /* insert L2 multicast addresses into hash list */
1045 while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) {
1046 if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0)
1051 while ((hn = mlx5e_remove_hn(&head_uc)) != NULL)
1053 while ((hn = mlx5e_remove_hn(&head_mc)) != NULL)
1055 while ((hn = mlx5e_remove_hn(&head_free)) != NULL)
1058 if (ctx.success == false)
1062 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
1063 u8 addr_array[][ETH_ALEN], int size)
1065 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1066 if_t ifp = priv->ifp;
1067 struct mlx5e_eth_addr_hash_node *hn;
1068 struct mlx5e_eth_addr_hash_head *addr_list;
1069 struct mlx5e_eth_addr_hash_node *tmp;
1073 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1075 if (is_uc) /* Make sure our own address is pushed first */
1076 ether_addr_copy(addr_array[i++], if_getlladdr(ifp));
1077 else if (priv->eth_addr.broadcast_enabled)
1078 ether_addr_copy(addr_array[i++], if_getbroadcastaddr(ifp));
1080 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
1081 if (ether_addr_equal(if_getlladdr(ifp), hn->ai.addr))
1085 ether_addr_copy(addr_array[i++], hn->ai.addr);
1089 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
1092 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1093 struct mlx5e_eth_addr_hash_node *hn;
1094 u8 (*addr_array)[ETH_ALEN] = NULL;
1095 struct mlx5e_eth_addr_hash_head *addr_list;
1096 struct mlx5e_eth_addr_hash_node *tmp;
1102 size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
1104 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1105 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
1107 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1108 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
1111 if (size > max_size) {
1112 mlx5_en_err(priv->ifp,
1113 "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
1114 is_uc ? "UC" : "MC", size, max_size);
1119 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
1124 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
1127 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
1130 mlx5_en_err(priv->ifp,
1131 "Failed to modify vport %s list err(%d)\n",
1132 is_uc ? "UC" : "MC", err);
1136 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
1138 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1140 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
1141 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
1142 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
1143 ea->allmulti_enabled,
1144 ea->promisc_enabled);
1148 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
1150 struct mlx5e_eth_addr_hash_node *hn;
1151 struct mlx5e_eth_addr_hash_node *tmp;
1154 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1155 mlx5e_execute_action(priv, hn);
1157 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1158 mlx5e_execute_action(priv, hn);
1162 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv, bool rx_mode_enable)
1164 struct mlx5e_eth_addr_hash_node *hn;
1165 struct mlx5e_eth_addr_hash_node *tmp;
1168 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1169 hn->action = MLX5E_ACTION_DEL;
1170 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1171 hn->action = MLX5E_ACTION_DEL;
1174 mlx5e_sync_ifp_addr(priv);
1176 mlx5e_apply_ifp_addr(priv);
1180 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv, bool rx_mode_enable)
1182 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1183 if_t ndev = priv->ifp;
1184 int ndev_flags = if_getflags(ndev);
1186 bool promisc_enabled = rx_mode_enable && (ndev_flags & IFF_PROMISC);
1187 bool allmulti_enabled = rx_mode_enable && (ndev_flags & IFF_ALLMULTI);
1188 bool broadcast_enabled = rx_mode_enable;
1190 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
1191 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
1192 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
1193 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
1194 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
1195 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
1197 /* update broadcast address */
1198 ether_addr_copy(priv->eth_addr.broadcast.addr,
1199 if_getbroadcastaddr(priv->ifp));
1201 if (enable_promisc) {
1202 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
1203 if (!priv->vlan.filter_disabled)
1204 mlx5e_add_any_vid_rules(priv);
1206 if (enable_allmulti)
1207 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
1208 if (enable_broadcast)
1209 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
1211 mlx5e_handle_ifp_addr(priv, rx_mode_enable);
1213 if (disable_broadcast)
1214 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
1215 if (disable_allmulti)
1216 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
1217 if (disable_promisc) {
1218 if (!priv->vlan.filter_disabled)
1219 mlx5e_del_any_vid_rules(priv);
1220 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
1223 ea->promisc_enabled = promisc_enabled;
1224 ea->allmulti_enabled = allmulti_enabled;
1225 ea->broadcast_enabled = broadcast_enabled;
1227 mlx5e_vport_context_update(priv);
1231 mlx5e_set_rx_mode_work(struct work_struct *work)
1233 struct mlx5e_priv *priv =
1234 container_of(work, struct mlx5e_priv, set_rx_mode_work);
1237 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1238 mlx5e_set_rx_mode_core(priv, true);
1243 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
1247 for (i = ft->num_groups - 1; i >= 0; i--) {
1248 if (!IS_ERR_OR_NULL(ft->g[i]))
1249 mlx5_destroy_flow_group(ft->g[i]);
1256 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1258 mlx5e_destroy_groups(ft);
1260 mlx5_destroy_flow_table(ft->t);
1264 #define MLX5E_NUM_MAIN_GROUPS 10
1265 #define MLX5E_MAIN_GROUP0_SIZE BIT(4)
1266 #define MLX5E_MAIN_GROUP1_SIZE BIT(3)
1267 #define MLX5E_MAIN_GROUP2_SIZE BIT(1)
1268 #define MLX5E_MAIN_GROUP3_SIZE BIT(0)
1269 #define MLX5E_MAIN_GROUP4_SIZE BIT(14)
1270 #define MLX5E_MAIN_GROUP5_SIZE BIT(13)
1271 #define MLX5E_MAIN_GROUP6_SIZE BIT(11)
1272 #define MLX5E_MAIN_GROUP7_SIZE BIT(2)
1273 #define MLX5E_MAIN_GROUP8_SIZE BIT(1)
1274 #define MLX5E_MAIN_GROUP9_SIZE BIT(0)
1275 #define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\
1276 MLX5E_MAIN_GROUP1_SIZE +\
1277 MLX5E_MAIN_GROUP2_SIZE +\
1278 MLX5E_MAIN_GROUP3_SIZE +\
1279 MLX5E_MAIN_GROUP4_SIZE +\
1280 MLX5E_MAIN_GROUP5_SIZE +\
1281 MLX5E_MAIN_GROUP6_SIZE +\
1282 MLX5E_MAIN_GROUP7_SIZE +\
1283 MLX5E_MAIN_GROUP8_SIZE +\
1284 MLX5E_MAIN_GROUP9_SIZE +\
1288 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1291 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1292 u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1293 match_criteria.outer_headers.dmac_47_16);
1297 /* Tunnel rules need to be first in this list of groups */
1299 /* Start tunnel rules */
1300 memset(in, 0, inlen);
1301 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1302 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1303 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1304 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1305 MLX5_SET_CFG(in, start_flow_index, ix);
1306 ix += MLX5E_MAIN_GROUP0_SIZE;
1307 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1308 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1309 if (IS_ERR(ft->g[ft->num_groups]))
1310 goto err_destory_groups;
1312 /* End Tunnel Rules */
1314 memset(in, 0, inlen);
1315 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1316 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1317 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1318 MLX5_SET_CFG(in, start_flow_index, ix);
1319 ix += MLX5E_MAIN_GROUP1_SIZE;
1320 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1321 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1322 if (IS_ERR(ft->g[ft->num_groups]))
1323 goto err_destory_groups;
1326 memset(in, 0, inlen);
1327 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1328 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1329 MLX5_SET_CFG(in, start_flow_index, ix);
1330 ix += MLX5E_MAIN_GROUP2_SIZE;
1331 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1332 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1333 if (IS_ERR(ft->g[ft->num_groups]))
1334 goto err_destory_groups;
1337 memset(in, 0, inlen);
1338 MLX5_SET_CFG(in, start_flow_index, ix);
1339 ix += MLX5E_MAIN_GROUP3_SIZE;
1340 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1341 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1342 if (IS_ERR(ft->g[ft->num_groups]))
1343 goto err_destory_groups;
1346 memset(in, 0, inlen);
1347 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1348 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1349 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1350 memset(dmac, 0xff, ETH_ALEN);
1351 MLX5_SET_CFG(in, start_flow_index, ix);
1352 ix += MLX5E_MAIN_GROUP4_SIZE;
1353 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1354 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1355 if (IS_ERR(ft->g[ft->num_groups]))
1356 goto err_destory_groups;
1359 memset(in, 0, inlen);
1360 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1361 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1362 memset(dmac, 0xff, ETH_ALEN);
1363 MLX5_SET_CFG(in, start_flow_index, ix);
1364 ix += MLX5E_MAIN_GROUP5_SIZE;
1365 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1366 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1367 if (IS_ERR(ft->g[ft->num_groups]))
1368 goto err_destory_groups;
1371 memset(in, 0, inlen);
1372 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1373 memset(dmac, 0xff, ETH_ALEN);
1374 MLX5_SET_CFG(in, start_flow_index, ix);
1375 ix += MLX5E_MAIN_GROUP6_SIZE;
1376 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1377 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1378 if (IS_ERR(ft->g[ft->num_groups]))
1379 goto err_destory_groups;
1382 memset(in, 0, inlen);
1383 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1384 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1385 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1387 MLX5_SET_CFG(in, start_flow_index, ix);
1388 ix += MLX5E_MAIN_GROUP7_SIZE;
1389 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1390 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1391 if (IS_ERR(ft->g[ft->num_groups]))
1392 goto err_destory_groups;
1395 memset(in, 0, inlen);
1396 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1397 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1399 MLX5_SET_CFG(in, start_flow_index, ix);
1400 ix += MLX5E_MAIN_GROUP8_SIZE;
1401 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1402 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1403 if (IS_ERR(ft->g[ft->num_groups]))
1404 goto err_destory_groups;
1407 memset(in, 0, inlen);
1408 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1410 MLX5_SET_CFG(in, start_flow_index, ix);
1411 ix += MLX5E_MAIN_GROUP9_SIZE;
1412 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1413 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1414 if (IS_ERR(ft->g[ft->num_groups]))
1415 goto err_destory_groups;
1421 err = PTR_ERR(ft->g[ft->num_groups]);
1422 ft->g[ft->num_groups] = NULL;
1423 mlx5e_destroy_groups(ft);
1429 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1432 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1435 in = mlx5_vzalloc(inlen);
1439 err = mlx5e_create_main_groups_sub(ft, in, inlen);
1445 #define MLX5E_MAIN_VXLAN_GROUP0_SIZE BIT(3)
1446 #define MLX5E_MAIN_VXLAN_GROUP1_SIZE BIT(3)
1447 #define MLX5E_MAIN_VXLAN_GROUP2_SIZE BIT(0)
1449 mlx5e_create_main_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1452 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1456 memset(in, 0, inlen);
1457 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1458 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1459 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1460 MLX5_SET_CFG(in, start_flow_index, ix);
1461 ix += MLX5E_MAIN_VXLAN_GROUP0_SIZE;
1462 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1463 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1464 if (IS_ERR(ft->g[ft->num_groups]))
1465 goto err_destory_groups;
1468 memset(in, 0, inlen);
1469 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1470 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1471 MLX5_SET_CFG(in, start_flow_index, ix);
1472 ix += MLX5E_MAIN_VXLAN_GROUP1_SIZE;
1473 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1474 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1475 if (IS_ERR(ft->g[ft->num_groups]))
1476 goto err_destory_groups;
1479 memset(in, 0, inlen);
1480 MLX5_SET_CFG(in, start_flow_index, ix);
1481 ix += MLX5E_MAIN_VXLAN_GROUP2_SIZE;
1482 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1483 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1484 if (IS_ERR(ft->g[ft->num_groups]))
1485 goto err_destory_groups;
1491 err = PTR_ERR(ft->g[ft->num_groups]);
1492 ft->g[ft->num_groups] = NULL;
1493 mlx5e_destroy_groups(ft);
1499 mlx5e_create_main_vxlan_groups(struct mlx5e_flow_table *ft)
1502 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1505 in = mlx5_vzalloc(inlen);
1509 err = mlx5e_create_main_vxlan_groups_sub(ft, in, inlen);
1517 mlx5e_create_main_flow_table(struct mlx5e_priv *priv, bool inner_vxlan)
1519 struct mlx5e_flow_table *ft = inner_vxlan ? &priv->fts.main_vxlan :
1524 ft->t = mlx5_create_flow_table(priv->fts.ns, 0,
1525 inner_vxlan ? "vxlan_main" : "main", MLX5E_MAIN_TABLE_SIZE);
1527 if (IS_ERR(ft->t)) {
1528 err = PTR_ERR(ft->t);
1532 ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1535 goto err_destroy_main_flow_table;
1538 err = inner_vxlan ? mlx5e_create_main_vxlan_groups(ft) :
1539 mlx5e_create_main_groups(ft);
1547 err_destroy_main_flow_table:
1548 mlx5_destroy_flow_table(ft->t);
1554 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1556 mlx5e_destroy_flow_table(&priv->fts.main);
1559 static void mlx5e_destroy_main_vxlan_flow_table(struct mlx5e_priv *priv)
1561 mlx5e_destroy_flow_table(&priv->fts.main_vxlan);
1564 #define MLX5E_NUM_VLAN_GROUPS 3
1565 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1566 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1567 #define MLX5E_VLAN_GROUP2_SIZE BIT(0)
1568 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1569 MLX5E_VLAN_GROUP1_SIZE +\
1570 MLX5E_VLAN_GROUP2_SIZE +\
1574 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1579 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1581 memset(in, 0, inlen);
1582 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1583 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1584 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1585 MLX5_SET_CFG(in, start_flow_index, ix);
1586 ix += MLX5E_VLAN_GROUP0_SIZE;
1587 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1588 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1589 if (IS_ERR(ft->g[ft->num_groups]))
1590 goto err_destory_groups;
1593 memset(in, 0, inlen);
1594 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1595 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1596 MLX5_SET_CFG(in, start_flow_index, ix);
1597 ix += MLX5E_VLAN_GROUP1_SIZE;
1598 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1599 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1600 if (IS_ERR(ft->g[ft->num_groups]))
1601 goto err_destory_groups;
1604 memset(in, 0, inlen);
1605 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1606 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1607 MLX5_SET_CFG(in, start_flow_index, ix);
1608 ix += MLX5E_VLAN_GROUP2_SIZE;
1609 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1610 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1611 if (IS_ERR(ft->g[ft->num_groups]))
1612 goto err_destory_groups;
1618 err = PTR_ERR(ft->g[ft->num_groups]);
1619 ft->g[ft->num_groups] = NULL;
1620 mlx5e_destroy_groups(ft);
1626 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1629 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1632 in = mlx5_vzalloc(inlen);
1636 err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1643 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1645 struct mlx5e_flow_table *ft = &priv->fts.vlan;
1649 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1650 MLX5E_VLAN_TABLE_SIZE);
1652 if (IS_ERR(ft->t)) {
1653 err = PTR_ERR(ft->t);
1657 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1660 goto err_destroy_vlan_flow_table;
1663 err = mlx5e_create_vlan_groups(ft);
1672 err_destroy_vlan_flow_table:
1673 mlx5_destroy_flow_table(ft->t);
1680 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1682 mlx5e_destroy_flow_table(&priv->fts.vlan);
1686 mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv,
1687 struct mlx5e_vxlan_db_el *el)
1689 struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1690 struct mlx5_flow_destination dest = {};
1692 struct mlx5_flow_rule **rule_p;
1694 struct mlx5_flow_act flow_act = {
1695 .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
1696 .flow_tag = MLX5_FS_ETH_FLOW_TAG,
1699 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1700 dest.ft = priv->fts.main_vxlan.t;
1702 mc_enable = MLX5_MATCH_OUTER_HEADERS;
1703 rule_p = &el->vxlan_ft_rule;
1704 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1705 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, el->proto);
1706 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1707 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
1708 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1709 MLX5_SET(fte_match_param, mv, outer_headers.udp_dport, el->port);
1711 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1712 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
1714 if (IS_ERR(*rule_p)) {
1715 err = PTR_ERR(*rule_p);
1717 mlx5_en_err(priv->ifp, "add rule failed\n");
1723 static struct mlx5e_vxlan_db_el *
1724 mlx5e_vxlan_find_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1726 struct mlx5e_vxlan_db_el *el;
1728 TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1729 if (el->proto == proto && el->port == port)
1735 static struct mlx5e_vxlan_db_el *
1736 mlx5e_vxlan_alloc_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1738 struct mlx5e_vxlan_db_el *el;
1740 el = mlx5_vzalloc(sizeof(*el));
1744 el->vxlan_ft_rule = NULL;
1749 mlx5e_vxlan_family_to_proto(sa_family_t family, u_int *proto)
1753 *proto = ETHERTYPE_IP;
1756 *proto = ETHERTYPE_IPV6;
1764 mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv *priv,
1765 struct mlx5e_vxlan_db_el *el)
1767 u32 *match_criteria;
1771 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1772 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1773 if (match_value == NULL || match_criteria == NULL) {
1774 mlx5_en_err(priv->ifp, "alloc failed\n");
1776 goto add_vxlan_rule_out;
1779 err = mlx5e_add_vxlan_rule_sub(priv, match_criteria, match_value, el);
1782 kvfree(match_criteria);
1783 kvfree(match_value);
1789 mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1791 struct mlx5e_vxlan_db_el *el;
1795 err = mlx5e_vxlan_family_to_proto(family, &proto);
1799 el = mlx5e_vxlan_find_db_el(priv, proto, port);
1805 el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
1807 if ((if_getcapenable(priv->ifp) & IFCAP_VXLAN_HWCSUM) != 0) {
1808 err = mlx5e_add_vxlan_rule_from_db(priv, el);
1810 el->installed = true;
1813 TAILQ_INSERT_TAIL(&priv->vxlan.head, el, link);
1821 mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
1823 struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1824 struct mlx5_flow_destination dest = {};
1826 struct mlx5_flow_rule **rule_p;
1828 struct mlx5_flow_act flow_act = {
1829 .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
1830 .flow_tag = MLX5_FS_ETH_FLOW_TAG,
1833 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1834 dest.ft = priv->fts.main.t;
1836 rule_p = &priv->fts.vxlan_catchall_ft_rule;
1837 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1838 MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
1840 if (IS_ERR(*rule_p)) {
1841 err = PTR_ERR(*rule_p);
1843 mlx5_en_err(priv->ifp, "add rule failed\n");
1851 mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv *priv)
1853 u32 *match_criteria;
1857 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1858 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1859 if (match_value == NULL || match_criteria == NULL) {
1860 mlx5_en_err(priv->ifp, "alloc failed\n");
1862 goto add_vxlan_rule_out;
1865 err = mlx5e_add_vxlan_catchall_rule_sub(priv, match_criteria,
1869 kvfree(match_criteria);
1870 kvfree(match_value);
1876 mlx5e_add_all_vxlan_rules(struct mlx5e_priv *priv)
1878 struct mlx5e_vxlan_db_el *el;
1882 TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1885 err = mlx5e_add_vxlan_rule_from_db(priv, el);
1888 el->installed = true;
1895 mlx5e_del_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1897 struct mlx5e_vxlan_db_el *el;
1901 err = mlx5e_vxlan_family_to_proto(family, &proto);
1905 el = mlx5e_vxlan_find_db_el(priv, proto, port);
1908 if (el->refcount > 1) {
1914 mlx5_del_flow_rule(&el->vxlan_ft_rule);
1915 TAILQ_REMOVE(&priv->vxlan.head, el, link);
1921 mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
1923 struct mlx5e_vxlan_db_el *el;
1925 TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1928 mlx5_del_flow_rule(&el->vxlan_ft_rule);
1929 el->installed = false;
1934 mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv *priv)
1936 mlx5_del_flow_rule(&priv->fts.vxlan_catchall_ft_rule);
1940 mlx5e_vxlan_start(void *arg, if_t ifp __unused, sa_family_t family,
1943 struct mlx5e_priv *priv = arg;
1947 err = mlx5_vxlan_udp_port_add(priv->mdev, port);
1948 if (err == 0 && test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1949 mlx5e_add_vxlan_rule(priv, family, port);
1954 mlx5e_vxlan_stop(void *arg, if_t ifp __unused, sa_family_t family,
1957 struct mlx5e_priv *priv = arg;
1960 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1961 mlx5e_del_vxlan_rule(priv, family, port);
1962 (void)mlx5_vxlan_udp_port_delete(priv->mdev, port);
1966 #define MLX5E_VXLAN_GROUP0_SIZE BIT(3) /* XXXKIB */
1967 #define MLX5E_VXLAN_GROUP1_SIZE BIT(0)
1968 #define MLX5E_NUM_VXLAN_GROUPS BIT(1)
1969 #define MLX5E_VXLAN_TABLE_SIZE \
1970 (MLX5E_VXLAN_GROUP0_SIZE + MLX5E_VXLAN_GROUP1_SIZE)
1973 mlx5e_create_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1978 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1980 memset(in, 0, inlen);
1981 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1982 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1983 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1984 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1985 MLX5_SET_CFG(in, start_flow_index, ix);
1986 ix += MLX5E_VXLAN_GROUP0_SIZE;
1987 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1988 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1989 if (IS_ERR(ft->g[ft->num_groups]))
1990 goto err_destory_groups;
1993 memset(in, 0, inlen);
1994 MLX5_SET_CFG(in, start_flow_index, ix);
1995 ix += MLX5E_VXLAN_GROUP1_SIZE;
1996 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1997 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1998 if (IS_ERR(ft->g[ft->num_groups]))
1999 goto err_destory_groups;
2005 err = PTR_ERR(ft->g[ft->num_groups]);
2006 ft->g[ft->num_groups] = NULL;
2007 mlx5e_destroy_groups(ft);
2013 mlx5e_create_vxlan_groups(struct mlx5e_flow_table *ft)
2016 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2019 in = mlx5_vzalloc(inlen);
2023 err = mlx5e_create_vxlan_groups_sub(ft, in, inlen);
2030 mlx5e_create_vxlan_flow_table(struct mlx5e_priv *priv)
2032 struct mlx5e_flow_table *ft = &priv->fts.vxlan;
2036 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vxlan",
2037 MLX5E_VXLAN_TABLE_SIZE);
2039 if (IS_ERR(ft->t)) {
2040 err = PTR_ERR(ft->t);
2044 ft->g = kcalloc(MLX5E_NUM_VXLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
2047 goto err_destroy_vxlan_flow_table;
2050 err = mlx5e_create_vxlan_groups(ft);
2054 TAILQ_INIT(&priv->vxlan.head);
2060 err_destroy_vxlan_flow_table:
2061 mlx5_destroy_flow_table(ft->t);
2067 #define MLX5E_NUM_INNER_RSS_GROUPS 3
2068 #define MLX5E_INNER_RSS_GROUP0_SIZE BIT(3)
2069 #define MLX5E_INNER_RSS_GROUP1_SIZE BIT(1)
2070 #define MLX5E_INNER_RSS_GROUP2_SIZE BIT(0)
2071 #define MLX5E_INNER_RSS_TABLE_SIZE (MLX5E_INNER_RSS_GROUP0_SIZE +\
2072 MLX5E_INNER_RSS_GROUP1_SIZE +\
2073 MLX5E_INNER_RSS_GROUP2_SIZE +\
2077 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
2080 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
2084 memset(in, 0, inlen);
2085 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2086 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2087 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
2088 MLX5_SET_CFG(in, start_flow_index, ix);
2089 ix += MLX5E_INNER_RSS_GROUP0_SIZE;
2090 MLX5_SET_CFG(in, end_flow_index, ix - 1);
2091 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2092 if (IS_ERR(ft->g[ft->num_groups]))
2093 goto err_destory_groups;
2096 memset(in, 0, inlen);
2097 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2098 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2099 MLX5_SET_CFG(in, start_flow_index, ix);
2100 ix += MLX5E_INNER_RSS_GROUP1_SIZE;
2101 MLX5_SET_CFG(in, end_flow_index, ix - 1);
2102 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2103 if (IS_ERR(ft->g[ft->num_groups]))
2104 goto err_destory_groups;
2107 memset(in, 0, inlen);
2108 MLX5_SET_CFG(in, start_flow_index, ix);
2109 ix += MLX5E_INNER_RSS_GROUP2_SIZE;
2110 MLX5_SET_CFG(in, end_flow_index, ix - 1);
2111 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2112 if (IS_ERR(ft->g[ft->num_groups]))
2113 goto err_destory_groups;
2119 err = PTR_ERR(ft->g[ft->num_groups]);
2120 ft->g[ft->num_groups] = NULL;
2121 mlx5e_destroy_groups(ft);
2127 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
2130 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2133 in = mlx5_vzalloc(inlen);
2137 err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
2144 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
2146 struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
2150 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
2151 MLX5E_INNER_RSS_TABLE_SIZE);
2153 if (IS_ERR(ft->t)) {
2154 err = PTR_ERR(ft->t);
2158 ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
2162 goto err_destroy_inner_rss_flow_table;
2165 err = mlx5e_create_inner_rss_groups(ft);
2174 err_destroy_inner_rss_flow_table:
2175 mlx5_destroy_flow_table(ft->t);
2181 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
2183 mlx5e_destroy_flow_table(&priv->fts.inner_rss);
2187 mlx5e_destroy_vxlan_flow_table(struct mlx5e_priv *priv)
2189 mlx5e_destroy_flow_table(&priv->fts.vxlan);
2193 mlx5e_open_flow_tables(struct mlx5e_priv *priv)
2197 /* setup namespace pointer */
2198 priv->fts.ns = mlx5_get_flow_namespace(
2199 priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
2201 err = mlx5e_create_vlan_flow_table(priv);
2205 err = mlx5e_create_vxlan_flow_table(priv);
2207 goto err_destroy_vlan_flow_table;
2209 err = mlx5e_create_main_flow_table(priv, true);
2211 goto err_destroy_vxlan_flow_table;
2213 err = mlx5e_create_inner_rss_flow_table(priv);
2215 goto err_destroy_main_flow_table_true;
2217 err = mlx5e_create_main_flow_table(priv, false);
2219 goto err_destroy_inner_rss_flow_table;
2221 err = mlx5e_add_vxlan_catchall_rule(priv);
2223 goto err_destroy_main_flow_table_false;
2225 err = mlx5e_accel_fs_tcp_create(priv);
2227 goto err_del_vxlan_catchall_rule;
2231 err_del_vxlan_catchall_rule:
2232 mlx5e_del_vxlan_catchall_rule(priv);
2233 err_destroy_main_flow_table_false:
2234 mlx5e_destroy_main_flow_table(priv);
2235 err_destroy_inner_rss_flow_table:
2236 mlx5e_destroy_inner_rss_flow_table(priv);
2237 err_destroy_main_flow_table_true:
2238 mlx5e_destroy_main_vxlan_flow_table(priv);
2239 err_destroy_vxlan_flow_table:
2240 mlx5e_destroy_vxlan_flow_table(priv);
2241 err_destroy_vlan_flow_table:
2242 mlx5e_destroy_vlan_flow_table(priv);
2248 mlx5e_close_flow_tables(struct mlx5e_priv *priv)
2250 mlx5e_accel_fs_tcp_destroy(priv);
2251 mlx5e_del_vxlan_catchall_rule(priv);
2252 mlx5e_destroy_main_flow_table(priv);
2253 mlx5e_destroy_inner_rss_flow_table(priv);
2254 mlx5e_destroy_main_vxlan_flow_table(priv);
2255 mlx5e_destroy_vxlan_flow_table(priv);
2256 mlx5e_destroy_vlan_flow_table(priv);
2260 mlx5e_open_flow_rules(struct mlx5e_priv *priv)
2264 err = mlx5e_add_all_vlan_rules(priv);
2268 err = mlx5e_add_main_vxlan_rules(priv);
2270 goto err_del_all_vlan_rules;
2272 err = mlx5e_add_all_vxlan_rules(priv);
2274 goto err_del_main_vxlan_rules;
2276 mlx5e_set_rx_mode_core(priv, true);
2278 set_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2282 err_del_main_vxlan_rules:
2283 mlx5e_del_main_vxlan_rules(priv);
2285 err_del_all_vlan_rules:
2286 mlx5e_del_all_vlan_rules(priv);
2292 mlx5e_close_flow_rules(struct mlx5e_priv *priv)
2294 clear_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2296 mlx5e_set_rx_mode_core(priv, false);
2297 mlx5e_del_all_vxlan_rules(priv);
2298 mlx5e_del_main_vxlan_rules(priv);
2299 mlx5e_del_all_vlan_rules(priv);