2 * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include "opt_ratelimit.h"
29 #include <dev/mlx5/mlx5_en/en.h>
31 #include <linux/list.h>
32 #include <dev/mlx5/fs.h>
33 #include <dev/mlx5/mpfs.h>
34 #include <dev/mlx5/mlx5_core/fs_tcp.h>
37 * The flow tables with rules define the packet processing on receive.
38 * Currently the following structure is set up to handle different
39 * offloads like TLS RX offload, VLAN decapsulation, packet
40 * classification, RSS hashing, VxLAN checksum offloading:
42 * +=========+ +=========+ +=================+
43 * |TCP/IPv4 | |TCP/IPv4 | |TCP/IPv4 Match |
44 * |Flowtable|------>| |----->|Outer Proto Match|=====> TLS TIR n
45 * | | |Catch-all|\ | |
46 * +=========+ +=========+| +=================+
48 * +------------------------+
50 * +=========+ +=========+ +=================+
51 * |TCP/IPv6 | |TCP/IPv6 | |TCP/IPv6 Match |
52 * |Flowtable|------>| |----->|Outer Proto Match|=====> TLS TIR n
53 * | | |Catch-all|\ | |
54 * +=========+ +=========+| +=================+
56 * +------------------------+
58 * +=========+ +=========+ +=================+
59 * |VLAN ft: | |VxLAN | |VxLAN Main |
60 * |CTAG/STAG|------>| VNI|----->|Inner Proto Match|=====> Inner TIR n
61 * |VID/noVID|/ |Catch-all|\ | |
62 * +=========+ +=========+| +=================+
69 * |Outer Proto Match|=====> TIR n
73 * The path through flow rules directs each packet into an appropriate TIR,
75 * - VLAN encapsulation
77 * - Presence of inner protocol
80 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
96 MLX5E_ACTION_NONE = 0,
101 struct mlx5e_eth_addr_hash_node {
102 LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
105 struct mlx5e_eth_addr_info ai;
108 static void mlx5e_del_all_vlan_rules(struct mlx5e_priv *);
111 mlx5e_hash_eth_addr(const u8 * addr)
117 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
118 struct mlx5e_eth_addr_hash_node *hn_new)
120 struct mlx5e_eth_addr_hash_node *hn;
121 u32 ix = mlx5e_hash_eth_addr(hn_new->ai.addr);
123 LIST_FOREACH(hn, &hash[ix], hlist) {
124 if (bcmp(hn->ai.addr, hn_new->ai.addr, ETHER_ADDR_LEN) == 0) {
125 if (hn->action == MLX5E_ACTION_DEL)
126 hn->action = MLX5E_ACTION_NONE;
127 free(hn_new, M_MLX5EN);
131 LIST_INSERT_HEAD(&hash[ix], hn_new, hlist);
136 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
138 LIST_REMOVE(hn, hlist);
143 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
144 struct mlx5e_eth_addr_info *ai)
146 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
147 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
148 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
149 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
150 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_TCP]);
151 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_TCP]);
152 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_UDP]);
153 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_UDP]);
154 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6]);
155 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4]);
156 mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_ANY]);
160 mlx5e_get_eth_addr_type(const u8 * addr)
162 if (ETHER_IS_MULTICAST(addr) == 0)
165 if ((addr[0] == 0x01) &&
169 return (MLX5E_MC_IPV4);
171 if ((addr[0] == 0x33) &&
173 return (MLX5E_MC_IPV6);
175 return (MLX5E_MC_OTHER);
179 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
185 case MLX5E_FULLMATCH:
186 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
187 switch (eth_addr_type) {
190 (1 << MLX5E_TT_IPV4_TCP) |
191 (1 << MLX5E_TT_IPV6_TCP) |
192 (1 << MLX5E_TT_IPV4_UDP) |
193 (1 << MLX5E_TT_IPV6_UDP) |
194 (1 << MLX5E_TT_IPV4) |
195 (1 << MLX5E_TT_IPV6) |
196 (1 << MLX5E_TT_ANY) |
202 (1 << MLX5E_TT_IPV4_UDP) |
203 (1 << MLX5E_TT_IPV4) |
209 (1 << MLX5E_TT_IPV6_UDP) |
210 (1 << MLX5E_TT_IPV6) |
216 (1 << MLX5E_TT_ANY) |
224 (1 << MLX5E_TT_IPV4_UDP) |
225 (1 << MLX5E_TT_IPV6_UDP) |
226 (1 << MLX5E_TT_IPV4) |
227 (1 << MLX5E_TT_IPV6) |
228 (1 << MLX5E_TT_ANY) |
232 default: /* MLX5E_PROMISC */
234 (1 << MLX5E_TT_IPV4_TCP) |
235 (1 << MLX5E_TT_IPV6_TCP) |
236 (1 << MLX5E_TT_IPV4_UDP) |
237 (1 << MLX5E_TT_IPV6_UDP) |
238 (1 << MLX5E_TT_IPV4) |
239 (1 << MLX5E_TT_IPV6) |
240 (1 << MLX5E_TT_ANY) |
249 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
250 struct mlx5e_eth_addr_info *ai, int type,
253 struct mlx5_flow_destination dest = {};
255 struct mlx5_flow_rule **rule_p;
256 struct mlx5_flow_table *ft = priv->fts.main.t;
257 u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
258 outer_headers.dmac_47_16);
259 u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
260 outer_headers.dmac_47_16);
261 u32 *tirn = priv->tirn;
265 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
268 case MLX5E_FULLMATCH:
269 mc_enable = MLX5_MATCH_OUTER_HEADERS;
270 memset(mc_dmac, 0xff, ETH_ALEN);
271 ether_addr_copy(mv_dmac, ai->addr);
275 mc_enable = MLX5_MATCH_OUTER_HEADERS;
286 tt_vec = mlx5e_get_tt_vec(ai, type);
288 if (tt_vec & BIT(MLX5E_TT_ANY)) {
289 rule_p = &ai->ft_rule[MLX5E_TT_ANY];
290 dest.tir_num = tirn[MLX5E_TT_ANY];
291 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
292 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
293 MLX5_FS_ETH_FLOW_TAG, &dest);
294 if (IS_ERR_OR_NULL(*rule_p))
298 mc_enable = MLX5_MATCH_OUTER_HEADERS;
299 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
301 if (tt_vec & BIT(MLX5E_TT_IPV4)) {
302 rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
303 dest.tir_num = tirn[MLX5E_TT_IPV4];
304 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
306 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
307 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
308 MLX5_FS_ETH_FLOW_TAG, &dest);
309 if (IS_ERR_OR_NULL(*rule_p))
313 if (tt_vec & BIT(MLX5E_TT_IPV6)) {
314 rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
315 dest.tir_num = tirn[MLX5E_TT_IPV6];
316 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
318 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
319 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
320 MLX5_FS_ETH_FLOW_TAG, &dest);
321 if (IS_ERR_OR_NULL(*rule_p))
325 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
326 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
328 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
329 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
330 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
331 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
333 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
334 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
335 MLX5_FS_ETH_FLOW_TAG, &dest);
336 if (IS_ERR_OR_NULL(*rule_p))
340 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
341 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
342 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
343 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
345 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
346 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
347 MLX5_FS_ETH_FLOW_TAG, &dest);
348 if (IS_ERR_OR_NULL(*rule_p))
352 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
354 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
355 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
356 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
357 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
359 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
360 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
361 MLX5_FS_ETH_FLOW_TAG, &dest);
362 if (IS_ERR_OR_NULL(*rule_p))
366 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
367 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
368 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
369 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
371 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
372 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
373 MLX5_FS_ETH_FLOW_TAG, &dest);
374 if (IS_ERR_OR_NULL(*rule_p))
378 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
380 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
381 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
382 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
383 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
385 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
386 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
387 MLX5_FS_ETH_FLOW_TAG, &dest);
388 if (IS_ERR_OR_NULL(*rule_p))
392 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
393 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
394 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
395 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
397 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
398 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
399 MLX5_FS_ETH_FLOW_TAG, &dest);
400 if (IS_ERR_OR_NULL(*rule_p))
404 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
406 if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
407 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
408 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
409 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
411 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
412 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
413 MLX5_FS_ETH_FLOW_TAG, &dest);
414 if (IS_ERR_OR_NULL(*rule_p))
418 if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
419 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
420 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
421 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
423 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
424 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
425 MLX5_FS_ETH_FLOW_TAG, &dest);
426 if (IS_ERR_OR_NULL(*rule_p))
433 err = PTR_ERR(*rule_p);
435 mlx5e_del_eth_addr_from_flow_table(priv, ai);
441 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
442 struct mlx5e_eth_addr_info *ai, int type)
448 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
449 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
450 if (!match_value || !match_criteria) {
451 mlx5_en_err(priv->ifp, "alloc failed\n");
453 goto add_eth_addr_rule_out;
455 err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
458 add_eth_addr_rule_out:
459 kvfree(match_criteria);
466 mlx5e_del_main_vxlan_rules(struct mlx5e_priv *priv)
468 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
469 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
470 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH]);
471 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH]);
472 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP]);
473 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP]);
474 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP]);
475 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP]);
476 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6]);
477 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4]);
478 mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_ANY]);
482 mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
484 struct mlx5_flow_destination dest = {};
486 struct mlx5_flow_rule **rule_p;
487 struct mlx5_flow_table *ft = priv->fts.main_vxlan.t;
488 u32 *tirn = priv->tirn_inner_vxlan;
491 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
493 mc_enable = MLX5_MATCH_INNER_HEADERS;
494 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
496 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4];
497 dest.tir_num = tirn[MLX5E_TT_IPV4];
498 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
499 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
500 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
501 if (IS_ERR_OR_NULL(*rule_p))
504 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6];
505 dest.tir_num = tirn[MLX5E_TT_IPV6];
506 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
507 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
508 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
509 if (IS_ERR_OR_NULL(*rule_p))
512 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
513 MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_UDP);
515 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP];
516 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
517 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
518 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
519 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
520 if (IS_ERR_OR_NULL(*rule_p))
523 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP];
524 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
525 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
526 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
527 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
528 if (IS_ERR_OR_NULL(*rule_p))
531 MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_TCP);
533 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP];
534 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
535 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
536 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
537 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
538 if (IS_ERR_OR_NULL(*rule_p))
541 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP];
542 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
543 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
544 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
545 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
546 if (IS_ERR_OR_NULL(*rule_p))
549 MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_AH);
551 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH];
552 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
553 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
554 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
555 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
556 if (IS_ERR_OR_NULL(*rule_p))
559 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH];
560 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
561 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
562 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
563 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
564 if (IS_ERR_OR_NULL(*rule_p))
567 MLX5_SET(fte_match_param, mv, inner_headers.ip_protocol, IPPROTO_ESP);
569 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP];
570 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
571 MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
572 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
573 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
574 if (IS_ERR_OR_NULL(*rule_p))
577 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP];
578 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
579 MLX5_SET(fte_match_param, mv, inner_headers.ethertype,
581 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
582 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
583 if (IS_ERR_OR_NULL(*rule_p))
587 memset(mv, 0, MLX5_ST_SZ_BYTES(fte_match_param));
588 memset(mc, 0, MLX5_ST_SZ_BYTES(fte_match_param));
589 rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_ANY];
590 dest.tir_num = tirn[MLX5E_TT_ANY];
591 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
592 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
593 if (IS_ERR_OR_NULL(*rule_p))
599 err = PTR_ERR(*rule_p);
601 mlx5e_del_main_vxlan_rules(priv);
607 mlx5e_add_main_vxlan_rules(struct mlx5e_priv *priv)
613 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
614 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
615 if (match_value == NULL || match_criteria == NULL) {
616 mlx5_en_err(priv->ifp, "alloc failed\n");
618 goto add_main_vxlan_rules_out;
620 err = mlx5e_add_main_vxlan_rules_sub(priv, match_criteria, match_value);
622 add_main_vxlan_rules_out:
623 kvfree(match_criteria);
629 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
631 if_t ifp = priv->ifp;
640 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
643 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
645 if (list_size > max_list_size) {
647 "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
648 list_size, max_list_size);
649 list_size = max_list_size;
652 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
657 for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
663 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
665 mlx5_en_err(ifp, "Failed to modify vport vlans list err(%d)\n",
672 enum mlx5e_vlan_rule_type {
673 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
674 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
675 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
676 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
680 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
681 enum mlx5e_vlan_rule_type rule_type, u16 vid,
684 struct mlx5_flow_table *ft = priv->fts.vlan.t;
685 struct mlx5_flow_destination dest = {};
687 struct mlx5_flow_rule **rule_p;
690 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
691 dest.ft = priv->fts.vxlan.t;
693 mc_enable = MLX5_MATCH_OUTER_HEADERS;
696 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
697 rule_p = &priv->vlan.untagged_ft_rule;
698 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
700 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
701 rule_p = &priv->vlan.any_cvlan_ft_rule;
702 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
703 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
705 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
706 rule_p = &priv->vlan.any_svlan_ft_rule;
707 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
708 MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
710 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
711 rule_p = &priv->vlan.active_vlans_ft_rule[vid];
712 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
713 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
714 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
715 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
716 mlx5e_vport_context_update_vlans(priv);
720 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
721 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
722 MLX5_FS_ETH_FLOW_TAG,
725 if (IS_ERR(*rule_p)) {
726 err = PTR_ERR(*rule_p);
728 mlx5_en_err(priv->ifp, "add rule failed\n");
735 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
736 enum mlx5e_vlan_rule_type rule_type, u16 vid)
742 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
743 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
744 if (!match_value || !match_criteria) {
745 mlx5_en_err(priv->ifp, "alloc failed\n");
747 goto add_vlan_rule_out;
750 err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
754 kvfree(match_criteria);
761 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
762 enum mlx5e_vlan_rule_type rule_type, u16 vid)
765 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
766 mlx5_del_flow_rule(&priv->vlan.untagged_ft_rule);
768 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
769 mlx5_del_flow_rule(&priv->vlan.any_cvlan_ft_rule);
771 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
772 mlx5_del_flow_rule(&priv->vlan.any_svlan_ft_rule);
774 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
775 mlx5_del_flow_rule(&priv->vlan.active_vlans_ft_rule[vid]);
776 mlx5e_vport_context_update_vlans(priv);
784 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
786 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
787 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
791 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
795 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
799 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
801 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
807 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
809 if (priv->vlan.filter_disabled) {
810 priv->vlan.filter_disabled = false;
811 if (if_getflags(priv->ifp) & IFF_PROMISC)
813 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
814 mlx5e_del_any_vid_rules(priv);
819 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
821 if (!priv->vlan.filter_disabled) {
822 priv->vlan.filter_disabled = true;
823 if (if_getflags(priv->ifp) & IFF_PROMISC)
825 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
826 mlx5e_add_any_vid_rules(priv);
831 mlx5e_vlan_rx_add_vid(void *arg, if_t ifp, u16 vid)
833 struct mlx5e_priv *priv = arg;
835 if (ifp != priv->ifp)
839 if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
840 test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
841 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
846 mlx5e_vlan_rx_kill_vid(void *arg, if_t ifp, u16 vid)
848 struct mlx5e_priv *priv = arg;
850 if (ifp != priv->ifp)
854 clear_bit(vid, priv->vlan.active_vlans);
855 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
856 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
861 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
866 set_bit(0, priv->vlan.active_vlans);
867 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
868 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
874 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
878 if (priv->vlan.filter_disabled) {
879 err = mlx5e_add_any_vid_rules(priv);
885 mlx5e_del_all_vlan_rules(priv);
890 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
894 if (priv->vlan.filter_disabled)
895 mlx5e_del_any_vid_rules(priv);
897 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
899 for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
900 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
901 clear_bit(0, priv->vlan.active_vlans);
904 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
905 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
906 LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
909 mlx5e_execute_action(struct mlx5e_priv *priv,
910 struct mlx5e_eth_addr_hash_node *hn)
912 switch (hn->action) {
913 case MLX5E_ACTION_ADD:
914 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
915 hn->action = MLX5E_ACTION_NONE;
918 case MLX5E_ACTION_DEL:
919 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
920 if (hn->mpfs_index != -1U)
921 mlx5_mpfs_del_mac(priv->mdev, hn->mpfs_index);
922 mlx5e_del_eth_addr_from_hash(hn);
930 static struct mlx5e_eth_addr_hash_node *
931 mlx5e_move_hn(struct mlx5e_eth_addr_hash_head *fh, struct mlx5e_eth_addr_hash_head *uh)
933 struct mlx5e_eth_addr_hash_node *hn;
937 LIST_REMOVE(hn, hlist);
938 LIST_INSERT_HEAD(uh, hn, hlist);
943 static struct mlx5e_eth_addr_hash_node *
944 mlx5e_remove_hn(struct mlx5e_eth_addr_hash_head *fh)
946 struct mlx5e_eth_addr_hash_node *hn;
950 LIST_REMOVE(hn, hlist);
954 struct mlx5e_copy_addr_ctx {
955 struct mlx5e_eth_addr_hash_head *free;
956 struct mlx5e_eth_addr_hash_head *fill;
961 mlx5e_copy_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
963 struct mlx5e_copy_addr_ctx *ctx = arg;
964 struct mlx5e_eth_addr_hash_node *hn;
966 hn = mlx5e_move_hn(ctx->free, ctx->fill);
968 ctx->success = false;
971 ether_addr_copy(hn->ai.addr, LLADDR(sdl));
977 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
979 struct mlx5e_copy_addr_ctx ctx;
980 struct mlx5e_eth_addr_hash_head head_free;
981 struct mlx5e_eth_addr_hash_head head_uc;
982 struct mlx5e_eth_addr_hash_head head_mc;
983 struct mlx5e_eth_addr_hash_node *hn;
984 if_t ifp = priv->ifp;
988 PRIV_ASSERT_LOCKED(priv);
991 LIST_INIT(&head_free);
994 num = 1 + if_lladdr_count(ifp) + if_llmaddr_count(ifp);
996 /* allocate place holders */
997 for (x = 0; x != num; x++) {
998 hn = malloc(sizeof(*hn), M_MLX5EN, M_WAITOK | M_ZERO);
999 hn->action = MLX5E_ACTION_ADD;
1000 hn->mpfs_index = -1U;
1001 LIST_INSERT_HEAD(&head_free, hn, hlist);
1004 hn = mlx5e_move_hn(&head_free, &head_uc);
1007 ether_addr_copy(hn->ai.addr, if_getlladdr(ifp));
1009 ctx.free = &head_free;
1010 ctx.fill = &head_uc;
1012 if_foreach_lladdr(ifp, mlx5e_copy_addr, &ctx);
1013 if (ctx.success == false)
1016 ctx.fill = &head_mc;
1017 if_foreach_llmaddr(ifp, mlx5e_copy_addr, &ctx);
1018 if (ctx.success == false)
1021 /* insert L2 unicast addresses into hash list */
1023 while ((hn = mlx5e_remove_hn(&head_uc)) != NULL) {
1024 if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc, hn) == 0)
1026 if (hn->mpfs_index == -1U)
1027 mlx5_mpfs_add_mac(priv->mdev, &hn->mpfs_index,
1031 /* insert L2 multicast addresses into hash list */
1033 while ((hn = mlx5e_remove_hn(&head_mc)) != NULL) {
1034 if (mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc, hn) == 0)
1039 while ((hn = mlx5e_remove_hn(&head_uc)) != NULL)
1041 while ((hn = mlx5e_remove_hn(&head_mc)) != NULL)
1043 while ((hn = mlx5e_remove_hn(&head_free)) != NULL)
1046 if (ctx.success == false)
1050 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
1051 u8 addr_array[][ETH_ALEN], int size)
1053 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1054 if_t ifp = priv->ifp;
1055 struct mlx5e_eth_addr_hash_node *hn;
1056 struct mlx5e_eth_addr_hash_head *addr_list;
1057 struct mlx5e_eth_addr_hash_node *tmp;
1061 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1063 if (is_uc) /* Make sure our own address is pushed first */
1064 ether_addr_copy(addr_array[i++], if_getlladdr(ifp));
1065 else if (priv->eth_addr.broadcast_enabled)
1066 ether_addr_copy(addr_array[i++], if_getbroadcastaddr(ifp));
1068 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
1069 if (ether_addr_equal(if_getlladdr(ifp), hn->ai.addr))
1073 ether_addr_copy(addr_array[i++], hn->ai.addr);
1077 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
1080 bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
1081 struct mlx5e_eth_addr_hash_node *hn;
1082 u8 (*addr_array)[ETH_ALEN] = NULL;
1083 struct mlx5e_eth_addr_hash_head *addr_list;
1084 struct mlx5e_eth_addr_hash_node *tmp;
1090 size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
1092 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1093 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
1095 addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
1096 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
1099 if (size > max_size) {
1100 mlx5_en_err(priv->ifp,
1101 "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
1102 is_uc ? "UC" : "MC", size, max_size);
1107 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
1112 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
1115 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
1118 mlx5_en_err(priv->ifp,
1119 "Failed to modify vport %s list err(%d)\n",
1120 is_uc ? "UC" : "MC", err);
1124 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
1126 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1128 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
1129 mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
1130 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
1131 ea->allmulti_enabled,
1132 ea->promisc_enabled);
1136 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
1138 struct mlx5e_eth_addr_hash_node *hn;
1139 struct mlx5e_eth_addr_hash_node *tmp;
1142 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1143 mlx5e_execute_action(priv, hn);
1145 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1146 mlx5e_execute_action(priv, hn);
1150 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv, bool rx_mode_enable)
1152 struct mlx5e_eth_addr_hash_node *hn;
1153 struct mlx5e_eth_addr_hash_node *tmp;
1156 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
1157 hn->action = MLX5E_ACTION_DEL;
1158 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
1159 hn->action = MLX5E_ACTION_DEL;
1162 mlx5e_sync_ifp_addr(priv);
1164 mlx5e_apply_ifp_addr(priv);
1168 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv, bool rx_mode_enable)
1170 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
1171 if_t ndev = priv->ifp;
1172 int ndev_flags = if_getflags(ndev);
1174 bool promisc_enabled = rx_mode_enable && (ndev_flags & IFF_PROMISC);
1175 bool allmulti_enabled = rx_mode_enable && (ndev_flags & IFF_ALLMULTI);
1176 bool broadcast_enabled = rx_mode_enable;
1178 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
1179 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
1180 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
1181 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
1182 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
1183 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
1185 /* update broadcast address */
1186 ether_addr_copy(priv->eth_addr.broadcast.addr,
1187 if_getbroadcastaddr(priv->ifp));
1189 if (enable_promisc) {
1190 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
1191 if (!priv->vlan.filter_disabled)
1192 mlx5e_add_any_vid_rules(priv);
1194 if (enable_allmulti)
1195 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
1196 if (enable_broadcast)
1197 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
1199 mlx5e_handle_ifp_addr(priv, rx_mode_enable);
1201 if (disable_broadcast)
1202 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
1203 if (disable_allmulti)
1204 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
1205 if (disable_promisc) {
1206 if (!priv->vlan.filter_disabled)
1207 mlx5e_del_any_vid_rules(priv);
1208 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
1211 ea->promisc_enabled = promisc_enabled;
1212 ea->allmulti_enabled = allmulti_enabled;
1213 ea->broadcast_enabled = broadcast_enabled;
1215 mlx5e_vport_context_update(priv);
1219 mlx5e_set_rx_mode_work(struct work_struct *work)
1221 struct mlx5e_priv *priv =
1222 container_of(work, struct mlx5e_priv, set_rx_mode_work);
1225 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1226 mlx5e_set_rx_mode_core(priv, true);
1231 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
1235 for (i = ft->num_groups - 1; i >= 0; i--) {
1236 if (!IS_ERR_OR_NULL(ft->g[i]))
1237 mlx5_destroy_flow_group(ft->g[i]);
1244 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
1246 mlx5e_destroy_groups(ft);
1248 mlx5_destroy_flow_table(ft->t);
1252 #define MLX5E_NUM_MAIN_GROUPS 10
1253 #define MLX5E_MAIN_GROUP0_SIZE BIT(4)
1254 #define MLX5E_MAIN_GROUP1_SIZE BIT(3)
1255 #define MLX5E_MAIN_GROUP2_SIZE BIT(1)
1256 #define MLX5E_MAIN_GROUP3_SIZE BIT(0)
1257 #define MLX5E_MAIN_GROUP4_SIZE BIT(14)
1258 #define MLX5E_MAIN_GROUP5_SIZE BIT(13)
1259 #define MLX5E_MAIN_GROUP6_SIZE BIT(11)
1260 #define MLX5E_MAIN_GROUP7_SIZE BIT(2)
1261 #define MLX5E_MAIN_GROUP8_SIZE BIT(1)
1262 #define MLX5E_MAIN_GROUP9_SIZE BIT(0)
1263 #define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\
1264 MLX5E_MAIN_GROUP1_SIZE +\
1265 MLX5E_MAIN_GROUP2_SIZE +\
1266 MLX5E_MAIN_GROUP3_SIZE +\
1267 MLX5E_MAIN_GROUP4_SIZE +\
1268 MLX5E_MAIN_GROUP5_SIZE +\
1269 MLX5E_MAIN_GROUP6_SIZE +\
1270 MLX5E_MAIN_GROUP7_SIZE +\
1271 MLX5E_MAIN_GROUP8_SIZE +\
1272 MLX5E_MAIN_GROUP9_SIZE +\
1276 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1279 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1280 u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1281 match_criteria.outer_headers.dmac_47_16);
1285 /* Tunnel rules need to be first in this list of groups */
1287 /* Start tunnel rules */
1288 memset(in, 0, inlen);
1289 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1290 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1291 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1292 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1293 MLX5_SET_CFG(in, start_flow_index, ix);
1294 ix += MLX5E_MAIN_GROUP0_SIZE;
1295 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1296 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1297 if (IS_ERR(ft->g[ft->num_groups]))
1298 goto err_destory_groups;
1300 /* End Tunnel Rules */
1302 memset(in, 0, inlen);
1303 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1304 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1305 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1306 MLX5_SET_CFG(in, start_flow_index, ix);
1307 ix += MLX5E_MAIN_GROUP1_SIZE;
1308 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1309 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1310 if (IS_ERR(ft->g[ft->num_groups]))
1311 goto err_destory_groups;
1314 memset(in, 0, inlen);
1315 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1316 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1317 MLX5_SET_CFG(in, start_flow_index, ix);
1318 ix += MLX5E_MAIN_GROUP2_SIZE;
1319 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1320 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1321 if (IS_ERR(ft->g[ft->num_groups]))
1322 goto err_destory_groups;
1325 memset(in, 0, inlen);
1326 MLX5_SET_CFG(in, start_flow_index, ix);
1327 ix += MLX5E_MAIN_GROUP3_SIZE;
1328 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1329 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1330 if (IS_ERR(ft->g[ft->num_groups]))
1331 goto err_destory_groups;
1334 memset(in, 0, inlen);
1335 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1336 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1337 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1338 memset(dmac, 0xff, ETH_ALEN);
1339 MLX5_SET_CFG(in, start_flow_index, ix);
1340 ix += MLX5E_MAIN_GROUP4_SIZE;
1341 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1342 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1343 if (IS_ERR(ft->g[ft->num_groups]))
1344 goto err_destory_groups;
1347 memset(in, 0, inlen);
1348 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1349 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1350 memset(dmac, 0xff, ETH_ALEN);
1351 MLX5_SET_CFG(in, start_flow_index, ix);
1352 ix += MLX5E_MAIN_GROUP5_SIZE;
1353 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1354 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1355 if (IS_ERR(ft->g[ft->num_groups]))
1356 goto err_destory_groups;
1359 memset(in, 0, inlen);
1360 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1361 memset(dmac, 0xff, ETH_ALEN);
1362 MLX5_SET_CFG(in, start_flow_index, ix);
1363 ix += MLX5E_MAIN_GROUP6_SIZE;
1364 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1365 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1366 if (IS_ERR(ft->g[ft->num_groups]))
1367 goto err_destory_groups;
1370 memset(in, 0, inlen);
1371 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1372 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1373 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1375 MLX5_SET_CFG(in, start_flow_index, ix);
1376 ix += MLX5E_MAIN_GROUP7_SIZE;
1377 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1378 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1379 if (IS_ERR(ft->g[ft->num_groups]))
1380 goto err_destory_groups;
1383 memset(in, 0, inlen);
1384 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1385 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1387 MLX5_SET_CFG(in, start_flow_index, ix);
1388 ix += MLX5E_MAIN_GROUP8_SIZE;
1389 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1390 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1391 if (IS_ERR(ft->g[ft->num_groups]))
1392 goto err_destory_groups;
1395 memset(in, 0, inlen);
1396 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1398 MLX5_SET_CFG(in, start_flow_index, ix);
1399 ix += MLX5E_MAIN_GROUP9_SIZE;
1400 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1401 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1402 if (IS_ERR(ft->g[ft->num_groups]))
1403 goto err_destory_groups;
1409 err = PTR_ERR(ft->g[ft->num_groups]);
1410 ft->g[ft->num_groups] = NULL;
1411 mlx5e_destroy_groups(ft);
1417 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1420 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1423 in = mlx5_vzalloc(inlen);
1427 err = mlx5e_create_main_groups_sub(ft, in, inlen);
1433 #define MLX5E_MAIN_VXLAN_GROUP0_SIZE BIT(3)
1434 #define MLX5E_MAIN_VXLAN_GROUP1_SIZE BIT(3)
1435 #define MLX5E_MAIN_VXLAN_GROUP2_SIZE BIT(0)
1437 mlx5e_create_main_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1440 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1444 memset(in, 0, inlen);
1445 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1446 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1447 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1448 MLX5_SET_CFG(in, start_flow_index, ix);
1449 ix += MLX5E_MAIN_VXLAN_GROUP0_SIZE;
1450 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1451 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1452 if (IS_ERR(ft->g[ft->num_groups]))
1453 goto err_destory_groups;
1456 memset(in, 0, inlen);
1457 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1458 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1459 MLX5_SET_CFG(in, start_flow_index, ix);
1460 ix += MLX5E_MAIN_VXLAN_GROUP1_SIZE;
1461 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1462 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1463 if (IS_ERR(ft->g[ft->num_groups]))
1464 goto err_destory_groups;
1467 memset(in, 0, inlen);
1468 MLX5_SET_CFG(in, start_flow_index, ix);
1469 ix += MLX5E_MAIN_VXLAN_GROUP2_SIZE;
1470 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1471 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1472 if (IS_ERR(ft->g[ft->num_groups]))
1473 goto err_destory_groups;
1479 err = PTR_ERR(ft->g[ft->num_groups]);
1480 ft->g[ft->num_groups] = NULL;
1481 mlx5e_destroy_groups(ft);
1487 mlx5e_create_main_vxlan_groups(struct mlx5e_flow_table *ft)
1490 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1493 in = mlx5_vzalloc(inlen);
1497 err = mlx5e_create_main_vxlan_groups_sub(ft, in, inlen);
1505 mlx5e_create_main_flow_table(struct mlx5e_priv *priv, bool inner_vxlan)
1507 struct mlx5e_flow_table *ft = inner_vxlan ? &priv->fts.main_vxlan :
1512 ft->t = mlx5_create_flow_table(priv->fts.ns, 0,
1513 inner_vxlan ? "vxlan_main" : "main", MLX5E_MAIN_TABLE_SIZE);
1515 if (IS_ERR(ft->t)) {
1516 err = PTR_ERR(ft->t);
1520 ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1523 goto err_destroy_main_flow_table;
1526 err = inner_vxlan ? mlx5e_create_main_vxlan_groups(ft) :
1527 mlx5e_create_main_groups(ft);
1535 err_destroy_main_flow_table:
1536 mlx5_destroy_flow_table(ft->t);
1542 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1544 mlx5e_destroy_flow_table(&priv->fts.main);
1547 static void mlx5e_destroy_main_vxlan_flow_table(struct mlx5e_priv *priv)
1549 mlx5e_destroy_flow_table(&priv->fts.main_vxlan);
1552 #define MLX5E_NUM_VLAN_GROUPS 3
1553 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1554 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1555 #define MLX5E_VLAN_GROUP2_SIZE BIT(0)
1556 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1557 MLX5E_VLAN_GROUP1_SIZE +\
1558 MLX5E_VLAN_GROUP2_SIZE +\
1562 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1567 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1569 memset(in, 0, inlen);
1570 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1571 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1572 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1573 MLX5_SET_CFG(in, start_flow_index, ix);
1574 ix += MLX5E_VLAN_GROUP0_SIZE;
1575 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1576 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1577 if (IS_ERR(ft->g[ft->num_groups]))
1578 goto err_destory_groups;
1581 memset(in, 0, inlen);
1582 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1583 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1584 MLX5_SET_CFG(in, start_flow_index, ix);
1585 ix += MLX5E_VLAN_GROUP1_SIZE;
1586 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1587 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1588 if (IS_ERR(ft->g[ft->num_groups]))
1589 goto err_destory_groups;
1592 memset(in, 0, inlen);
1593 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1594 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1595 MLX5_SET_CFG(in, start_flow_index, ix);
1596 ix += MLX5E_VLAN_GROUP2_SIZE;
1597 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1598 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1599 if (IS_ERR(ft->g[ft->num_groups]))
1600 goto err_destory_groups;
1606 err = PTR_ERR(ft->g[ft->num_groups]);
1607 ft->g[ft->num_groups] = NULL;
1608 mlx5e_destroy_groups(ft);
1614 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1617 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1620 in = mlx5_vzalloc(inlen);
1624 err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1631 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1633 struct mlx5e_flow_table *ft = &priv->fts.vlan;
1637 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1638 MLX5E_VLAN_TABLE_SIZE);
1640 if (IS_ERR(ft->t)) {
1641 err = PTR_ERR(ft->t);
1645 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1648 goto err_destroy_vlan_flow_table;
1651 err = mlx5e_create_vlan_groups(ft);
1660 err_destroy_vlan_flow_table:
1661 mlx5_destroy_flow_table(ft->t);
1668 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1670 mlx5e_destroy_flow_table(&priv->fts.vlan);
1674 mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv,
1675 struct mlx5e_vxlan_db_el *el)
1677 struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1678 struct mlx5_flow_destination dest = {};
1680 struct mlx5_flow_rule **rule_p;
1683 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1684 dest.ft = priv->fts.main_vxlan.t;
1686 mc_enable = MLX5_MATCH_OUTER_HEADERS;
1687 rule_p = &el->vxlan_ft_rule;
1688 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1689 MLX5_SET(fte_match_param, mv, outer_headers.ethertype, el->proto);
1690 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1691 MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
1692 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1693 MLX5_SET(fte_match_param, mv, outer_headers.udp_dport, el->port);
1695 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1696 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
1698 if (IS_ERR(*rule_p)) {
1699 err = PTR_ERR(*rule_p);
1701 mlx5_en_err(priv->ifp, "add rule failed\n");
1707 static struct mlx5e_vxlan_db_el *
1708 mlx5e_vxlan_find_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1710 struct mlx5e_vxlan_db_el *el;
1712 TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1713 if (el->proto == proto && el->port == port)
1719 static struct mlx5e_vxlan_db_el *
1720 mlx5e_vxlan_alloc_db_el(struct mlx5e_priv *priv, u_int proto, u_int port)
1722 struct mlx5e_vxlan_db_el *el;
1724 el = mlx5_vzalloc(sizeof(*el));
1728 el->vxlan_ft_rule = NULL;
1733 mlx5e_vxlan_family_to_proto(sa_family_t family, u_int *proto)
1737 *proto = ETHERTYPE_IP;
1740 *proto = ETHERTYPE_IPV6;
1748 mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv *priv,
1749 struct mlx5e_vxlan_db_el *el)
1751 u32 *match_criteria;
1755 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1756 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1757 if (match_value == NULL || match_criteria == NULL) {
1758 mlx5_en_err(priv->ifp, "alloc failed\n");
1760 goto add_vxlan_rule_out;
1763 err = mlx5e_add_vxlan_rule_sub(priv, match_criteria, match_value, el);
1766 kvfree(match_criteria);
1767 kvfree(match_value);
1773 mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1775 struct mlx5e_vxlan_db_el *el;
1779 err = mlx5e_vxlan_family_to_proto(family, &proto);
1783 el = mlx5e_vxlan_find_db_el(priv, proto, port);
1789 el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
1791 if ((if_getcapenable(priv->ifp) & IFCAP_VXLAN_HWCSUM) != 0) {
1792 err = mlx5e_add_vxlan_rule_from_db(priv, el);
1794 el->installed = true;
1797 TAILQ_INSERT_TAIL(&priv->vxlan.head, el, link);
1805 mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
1807 struct mlx5_flow_table *ft = priv->fts.vxlan.t;
1808 struct mlx5_flow_destination dest = {};
1810 struct mlx5_flow_rule **rule_p;
1813 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1814 dest.ft = priv->fts.main.t;
1816 rule_p = &priv->fts.vxlan_catchall_ft_rule;
1817 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
1818 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_ETH_FLOW_TAG, &dest);
1820 if (IS_ERR(*rule_p)) {
1821 err = PTR_ERR(*rule_p);
1823 mlx5_en_err(priv->ifp, "add rule failed\n");
1831 mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv *priv)
1833 u32 *match_criteria;
1837 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1838 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
1839 if (match_value == NULL || match_criteria == NULL) {
1840 mlx5_en_err(priv->ifp, "alloc failed\n");
1842 goto add_vxlan_rule_out;
1845 err = mlx5e_add_vxlan_catchall_rule_sub(priv, match_criteria,
1849 kvfree(match_criteria);
1850 kvfree(match_value);
1856 mlx5e_add_all_vxlan_rules(struct mlx5e_priv *priv)
1858 struct mlx5e_vxlan_db_el *el;
1862 TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1865 err = mlx5e_add_vxlan_rule_from_db(priv, el);
1868 el->installed = true;
1875 mlx5e_del_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
1877 struct mlx5e_vxlan_db_el *el;
1881 err = mlx5e_vxlan_family_to_proto(family, &proto);
1885 el = mlx5e_vxlan_find_db_el(priv, proto, port);
1888 if (el->refcount > 1) {
1894 mlx5_del_flow_rule(&el->vxlan_ft_rule);
1895 TAILQ_REMOVE(&priv->vxlan.head, el, link);
1901 mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
1903 struct mlx5e_vxlan_db_el *el;
1905 TAILQ_FOREACH(el, &priv->vxlan.head, link) {
1908 mlx5_del_flow_rule(&el->vxlan_ft_rule);
1909 el->installed = false;
1914 mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv *priv)
1916 mlx5_del_flow_rule(&priv->fts.vxlan_catchall_ft_rule);
1920 mlx5e_vxlan_start(void *arg, if_t ifp __unused, sa_family_t family,
1923 struct mlx5e_priv *priv = arg;
1927 err = mlx5_vxlan_udp_port_add(priv->mdev, port);
1928 if (err == 0 && test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1929 mlx5e_add_vxlan_rule(priv, family, port);
1934 mlx5e_vxlan_stop(void *arg, if_t ifp __unused, sa_family_t family,
1937 struct mlx5e_priv *priv = arg;
1940 if (test_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state))
1941 mlx5e_del_vxlan_rule(priv, family, port);
1942 (void)mlx5_vxlan_udp_port_delete(priv->mdev, port);
1946 #define MLX5E_VXLAN_GROUP0_SIZE BIT(3) /* XXXKIB */
1947 #define MLX5E_VXLAN_GROUP1_SIZE BIT(0)
1948 #define MLX5E_NUM_VXLAN_GROUPS BIT(1)
1949 #define MLX5E_VXLAN_TABLE_SIZE \
1950 (MLX5E_VXLAN_GROUP0_SIZE + MLX5E_VXLAN_GROUP1_SIZE)
1953 mlx5e_create_vxlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1958 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1960 memset(in, 0, inlen);
1961 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1962 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1963 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1964 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1965 MLX5_SET_CFG(in, start_flow_index, ix);
1966 ix += MLX5E_VXLAN_GROUP0_SIZE;
1967 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1968 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1969 if (IS_ERR(ft->g[ft->num_groups]))
1970 goto err_destory_groups;
1973 memset(in, 0, inlen);
1974 MLX5_SET_CFG(in, start_flow_index, ix);
1975 ix += MLX5E_VXLAN_GROUP1_SIZE;
1976 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1977 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1978 if (IS_ERR(ft->g[ft->num_groups]))
1979 goto err_destory_groups;
1985 err = PTR_ERR(ft->g[ft->num_groups]);
1986 ft->g[ft->num_groups] = NULL;
1987 mlx5e_destroy_groups(ft);
1993 mlx5e_create_vxlan_groups(struct mlx5e_flow_table *ft)
1996 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1999 in = mlx5_vzalloc(inlen);
2003 err = mlx5e_create_vxlan_groups_sub(ft, in, inlen);
2010 mlx5e_create_vxlan_flow_table(struct mlx5e_priv *priv)
2012 struct mlx5e_flow_table *ft = &priv->fts.vxlan;
2016 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vxlan",
2017 MLX5E_VXLAN_TABLE_SIZE);
2019 if (IS_ERR(ft->t)) {
2020 err = PTR_ERR(ft->t);
2024 ft->g = kcalloc(MLX5E_NUM_VXLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
2027 goto err_destroy_vxlan_flow_table;
2030 err = mlx5e_create_vxlan_groups(ft);
2034 TAILQ_INIT(&priv->vxlan.head);
2040 err_destroy_vxlan_flow_table:
2041 mlx5_destroy_flow_table(ft->t);
2047 #define MLX5E_NUM_INNER_RSS_GROUPS 3
2048 #define MLX5E_INNER_RSS_GROUP0_SIZE BIT(3)
2049 #define MLX5E_INNER_RSS_GROUP1_SIZE BIT(1)
2050 #define MLX5E_INNER_RSS_GROUP2_SIZE BIT(0)
2051 #define MLX5E_INNER_RSS_TABLE_SIZE (MLX5E_INNER_RSS_GROUP0_SIZE +\
2052 MLX5E_INNER_RSS_GROUP1_SIZE +\
2053 MLX5E_INNER_RSS_GROUP2_SIZE +\
2057 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
2060 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
2064 memset(in, 0, inlen);
2065 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2066 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2067 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
2068 MLX5_SET_CFG(in, start_flow_index, ix);
2069 ix += MLX5E_INNER_RSS_GROUP0_SIZE;
2070 MLX5_SET_CFG(in, end_flow_index, ix - 1);
2071 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2072 if (IS_ERR(ft->g[ft->num_groups]))
2073 goto err_destory_groups;
2076 memset(in, 0, inlen);
2077 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
2078 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
2079 MLX5_SET_CFG(in, start_flow_index, ix);
2080 ix += MLX5E_INNER_RSS_GROUP1_SIZE;
2081 MLX5_SET_CFG(in, end_flow_index, ix - 1);
2082 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2083 if (IS_ERR(ft->g[ft->num_groups]))
2084 goto err_destory_groups;
2087 memset(in, 0, inlen);
2088 MLX5_SET_CFG(in, start_flow_index, ix);
2089 ix += MLX5E_INNER_RSS_GROUP2_SIZE;
2090 MLX5_SET_CFG(in, end_flow_index, ix - 1);
2091 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
2092 if (IS_ERR(ft->g[ft->num_groups]))
2093 goto err_destory_groups;
2099 err = PTR_ERR(ft->g[ft->num_groups]);
2100 ft->g[ft->num_groups] = NULL;
2101 mlx5e_destroy_groups(ft);
2107 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
2110 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2113 in = mlx5_vzalloc(inlen);
2117 err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
2124 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
2126 struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
2130 ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
2131 MLX5E_INNER_RSS_TABLE_SIZE);
2133 if (IS_ERR(ft->t)) {
2134 err = PTR_ERR(ft->t);
2138 ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
2142 goto err_destroy_inner_rss_flow_table;
2145 err = mlx5e_create_inner_rss_groups(ft);
2154 err_destroy_inner_rss_flow_table:
2155 mlx5_destroy_flow_table(ft->t);
2161 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
2163 mlx5e_destroy_flow_table(&priv->fts.inner_rss);
2167 mlx5e_destroy_vxlan_flow_table(struct mlx5e_priv *priv)
2169 mlx5e_destroy_flow_table(&priv->fts.vxlan);
2173 mlx5e_open_flow_tables(struct mlx5e_priv *priv)
2177 /* setup namespace pointer */
2178 priv->fts.ns = mlx5_get_flow_namespace(
2179 priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
2181 err = mlx5e_create_vlan_flow_table(priv);
2185 err = mlx5e_create_vxlan_flow_table(priv);
2187 goto err_destroy_vlan_flow_table;
2189 err = mlx5e_create_main_flow_table(priv, true);
2191 goto err_destroy_vxlan_flow_table;
2193 err = mlx5e_create_inner_rss_flow_table(priv);
2195 goto err_destroy_main_flow_table_true;
2197 err = mlx5e_create_main_flow_table(priv, false);
2199 goto err_destroy_inner_rss_flow_table;
2201 err = mlx5e_add_vxlan_catchall_rule(priv);
2203 goto err_destroy_main_flow_table_false;
2205 err = mlx5e_accel_fs_tcp_create(priv);
2207 goto err_del_vxlan_catchall_rule;
2211 err_del_vxlan_catchall_rule:
2212 mlx5e_del_vxlan_catchall_rule(priv);
2213 err_destroy_main_flow_table_false:
2214 mlx5e_destroy_main_flow_table(priv);
2215 err_destroy_inner_rss_flow_table:
2216 mlx5e_destroy_inner_rss_flow_table(priv);
2217 err_destroy_main_flow_table_true:
2218 mlx5e_destroy_main_vxlan_flow_table(priv);
2219 err_destroy_vxlan_flow_table:
2220 mlx5e_destroy_vxlan_flow_table(priv);
2221 err_destroy_vlan_flow_table:
2222 mlx5e_destroy_vlan_flow_table(priv);
2228 mlx5e_close_flow_tables(struct mlx5e_priv *priv)
2230 mlx5e_accel_fs_tcp_destroy(priv);
2231 mlx5e_del_vxlan_catchall_rule(priv);
2232 mlx5e_destroy_main_flow_table(priv);
2233 mlx5e_destroy_inner_rss_flow_table(priv);
2234 mlx5e_destroy_main_vxlan_flow_table(priv);
2235 mlx5e_destroy_vxlan_flow_table(priv);
2236 mlx5e_destroy_vlan_flow_table(priv);
2240 mlx5e_open_flow_rules(struct mlx5e_priv *priv)
2244 err = mlx5e_add_all_vlan_rules(priv);
2248 err = mlx5e_add_main_vxlan_rules(priv);
2250 goto err_del_all_vlan_rules;
2252 err = mlx5e_add_all_vxlan_rules(priv);
2254 goto err_del_main_vxlan_rules;
2256 mlx5e_set_rx_mode_core(priv, true);
2258 set_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2262 err_del_main_vxlan_rules:
2263 mlx5e_del_main_vxlan_rules(priv);
2265 err_del_all_vlan_rules:
2266 mlx5e_del_all_vlan_rules(priv);
2272 mlx5e_close_flow_rules(struct mlx5e_priv *priv)
2274 clear_bit(MLX5E_STATE_FLOW_RULES_READY, &priv->state);
2276 mlx5e_set_rx_mode_core(priv, false);
2277 mlx5e_del_all_vxlan_rules(priv);
2278 mlx5e_del_main_vxlan_rules(priv);
2279 mlx5e_del_all_vlan_rules(priv);