2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <linux/list.h>
31 #include <dev/mlx5/flow_table.h>
47 MLX5E_ACTION_NONE = 0,
52 struct mlx5e_eth_addr_hash_node {
53 LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
55 struct mlx5e_eth_addr_info ai;
59 mlx5e_hash_eth_addr(const u8 * addr)
65 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
68 struct mlx5e_eth_addr_hash_node *hn;
69 int ix = mlx5e_hash_eth_addr(addr);
71 LIST_FOREACH(hn, &hash[ix], hlist) {
72 if (bcmp(hn->ai.addr, addr, ETHER_ADDR_LEN) == 0) {
73 if (hn->action == MLX5E_ACTION_DEL)
74 hn->action = MLX5E_ACTION_NONE;
79 hn = malloc(sizeof(*hn), M_MLX5EN, M_NOWAIT | M_ZERO);
83 ether_addr_copy(hn->ai.addr, addr);
84 hn->action = MLX5E_ACTION_ADD;
86 LIST_INSERT_HEAD(&hash[ix], hn, hlist);
90 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
92 LIST_REMOVE(hn, hlist);
97 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
98 struct mlx5e_eth_addr_info *ai)
100 void *ft = priv->ft.main;
102 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
103 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
105 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
106 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
108 if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
109 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
111 if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
112 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
114 if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
115 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
117 if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
118 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
120 if (ai->tt_vec & (1 << MLX5E_TT_ANY))
121 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
125 mlx5e_get_eth_addr_type(const u8 * addr)
127 if (ETHER_IS_MULTICAST(addr) == 0)
130 if ((addr[0] == 0x01) &&
134 return (MLX5E_MC_IPV4);
136 if ((addr[0] == 0x33) &&
138 return (MLX5E_MC_IPV6);
140 return (MLX5E_MC_OTHER);
144 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
150 case MLX5E_FULLMATCH:
151 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
152 switch (eth_addr_type) {
155 (1 << MLX5E_TT_IPV4_TCP) |
156 (1 << MLX5E_TT_IPV6_TCP) |
157 (1 << MLX5E_TT_IPV4_UDP) |
158 (1 << MLX5E_TT_IPV6_UDP) |
159 (1 << MLX5E_TT_IPV4) |
160 (1 << MLX5E_TT_IPV6) |
161 (1 << MLX5E_TT_ANY) |
167 (1 << MLX5E_TT_IPV4_UDP) |
168 (1 << MLX5E_TT_IPV4) |
174 (1 << MLX5E_TT_IPV6_UDP) |
175 (1 << MLX5E_TT_IPV6) |
181 (1 << MLX5E_TT_ANY) |
189 (1 << MLX5E_TT_IPV4_UDP) |
190 (1 << MLX5E_TT_IPV6_UDP) |
191 (1 << MLX5E_TT_IPV4) |
192 (1 << MLX5E_TT_IPV6) |
193 (1 << MLX5E_TT_ANY) |
197 default: /* MLX5E_PROMISC */
199 (1 << MLX5E_TT_IPV4_TCP) |
200 (1 << MLX5E_TT_IPV6_TCP) |
201 (1 << MLX5E_TT_IPV4_UDP) |
202 (1 << MLX5E_TT_IPV6_UDP) |
203 (1 << MLX5E_TT_IPV4) |
204 (1 << MLX5E_TT_IPV6) |
205 (1 << MLX5E_TT_ANY) |
214 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
215 struct mlx5e_eth_addr_info *ai, int type,
216 void *flow_context, void *match_criteria)
218 u8 match_criteria_enable = 0;
222 u8 *match_criteria_dmac;
223 void *ft = priv->ft.main;
224 u32 *tirn = priv->tirn;
228 match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
229 dmac = MLX5_ADDR_OF(fte_match_param, match_value,
230 outer_headers.dmac_47_16);
231 match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
232 outer_headers.dmac_47_16);
233 dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
235 MLX5_SET(flow_context, flow_context, action,
236 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
237 MLX5_SET(flow_context, flow_context, destination_list_size, 1);
238 MLX5_SET(dest_format_struct, dest, destination_type,
239 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
242 case MLX5E_FULLMATCH:
243 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
244 memset(match_criteria_dmac, 0xff, ETH_ALEN);
245 ether_addr_copy(dmac, ai->addr);
249 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
250 match_criteria_dmac[0] = 0x01;
260 tt_vec = mlx5e_get_tt_vec(ai, type);
262 if (tt_vec & (1 << MLX5E_TT_ANY)) {
263 MLX5_SET(dest_format_struct, dest, destination_id,
265 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
266 match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_ANY]);
268 mlx5e_del_eth_addr_from_flow_table(priv, ai);
271 ai->tt_vec |= (1 << MLX5E_TT_ANY);
274 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
275 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
276 outer_headers.ethertype);
278 if (tt_vec & (1 << MLX5E_TT_IPV4)) {
279 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
281 MLX5_SET(dest_format_struct, dest, destination_id,
282 tirn[MLX5E_TT_IPV4]);
283 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
284 match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4]);
286 mlx5e_del_eth_addr_from_flow_table(priv, ai);
289 ai->tt_vec |= (1 << MLX5E_TT_IPV4);
292 if (tt_vec & (1 << MLX5E_TT_IPV6)) {
293 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
295 MLX5_SET(dest_format_struct, dest, destination_id,
296 tirn[MLX5E_TT_IPV6]);
297 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
298 match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6]);
300 mlx5e_del_eth_addr_from_flow_table(priv, ai);
303 ai->tt_vec |= (1 << MLX5E_TT_IPV6);
305 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
306 outer_headers.ip_protocol);
307 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
310 if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
311 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
313 MLX5_SET(dest_format_struct, dest, destination_id,
314 tirn[MLX5E_TT_IPV4_UDP]);
315 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
316 match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
318 mlx5e_del_eth_addr_from_flow_table(priv, ai);
321 ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
323 if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
324 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
326 MLX5_SET(dest_format_struct, dest, destination_id,
327 tirn[MLX5E_TT_IPV6_UDP]);
328 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
329 match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
331 mlx5e_del_eth_addr_from_flow_table(priv, ai);
334 ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
336 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
339 if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
340 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
342 MLX5_SET(dest_format_struct, dest, destination_id,
343 tirn[MLX5E_TT_IPV4_TCP]);
344 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
345 match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
347 mlx5e_del_eth_addr_from_flow_table(priv, ai);
350 ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
352 if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
353 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
355 MLX5_SET(dest_format_struct, dest, destination_id,
356 tirn[MLX5E_TT_IPV6_TCP]);
357 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
358 match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
360 mlx5e_del_eth_addr_from_flow_table(priv, ai);
363 ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
369 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
370 struct mlx5e_eth_addr_info *ai, int type)
376 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
377 MLX5_ST_SZ_BYTES(dest_format_struct));
378 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
379 if (!flow_context || !match_criteria) {
380 if_printf(priv->ifp, "%s: alloc failed\n", __func__);
382 goto add_eth_addr_rule_out;
385 err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, flow_context,
388 if_printf(priv->ifp, "%s: failed\n", __func__);
390 add_eth_addr_rule_out:
391 kvfree(match_criteria);
392 kvfree(flow_context);
396 enum mlx5e_vlan_rule_type {
397 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
398 MLX5E_VLAN_RULE_TYPE_ANY_VID,
399 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
403 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
404 enum mlx5e_vlan_rule_type rule_type, u16 vid)
406 u8 match_criteria_enable = 0;
414 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
415 MLX5_ST_SZ_BYTES(dest_format_struct));
416 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
417 if (!flow_context || !match_criteria) {
418 if_printf(priv->ifp, "%s: alloc failed\n", __func__);
420 goto add_vlan_rule_out;
422 match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
423 dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
425 MLX5_SET(flow_context, flow_context, action,
426 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
427 MLX5_SET(flow_context, flow_context, destination_list_size, 1);
428 MLX5_SET(dest_format_struct, dest, destination_type,
429 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
430 MLX5_SET(dest_format_struct, dest, destination_id,
431 mlx5_get_flow_table_id(priv->ft.main));
433 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
434 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
435 outer_headers.vlan_tag);
438 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
439 ft_ix = &priv->vlan.untagged_rule_ft_ix;
441 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
442 ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
443 MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
446 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
447 ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
448 MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
450 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
451 outer_headers.first_vid);
452 MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
457 err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
458 match_criteria, flow_context, ft_ix);
460 if_printf(priv->ifp, "%s: failed\n", __func__);
463 kvfree(match_criteria);
464 kvfree(flow_context);
469 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
470 enum mlx5e_vlan_rule_type rule_type, u16 vid)
473 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
474 mlx5_del_flow_table_entry(priv->ft.vlan,
475 priv->vlan.untagged_rule_ft_ix);
477 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
478 mlx5_del_flow_table_entry(priv->ft.vlan,
479 priv->vlan.any_vlan_rule_ft_ix);
481 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
482 mlx5_del_flow_table_entry(priv->ft.vlan,
483 priv->vlan.active_vlans_ft_ix[vid]);
489 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
491 if (priv->vlan.filter_disabled) {
492 priv->vlan.filter_disabled = false;
493 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
494 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
500 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
502 if (!priv->vlan.filter_disabled) {
503 priv->vlan.filter_disabled = true;
504 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
505 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
511 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
513 struct mlx5e_priv *priv = arg;
515 if (ifp != priv->ifp)
519 set_bit(vid, priv->vlan.active_vlans);
520 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
521 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
526 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
528 struct mlx5e_priv *priv = arg;
530 if (ifp != priv->ifp)
534 clear_bit(vid, priv->vlan.active_vlans);
535 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
536 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
541 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
546 for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
547 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
553 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
557 if (priv->vlan.filter_disabled) {
558 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
567 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
571 if (priv->vlan.filter_disabled)
572 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
574 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
576 for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
577 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
580 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
581 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
582 LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
585 mlx5e_execute_action(struct mlx5e_priv *priv,
586 struct mlx5e_eth_addr_hash_node *hn)
588 switch (hn->action) {
589 case MLX5E_ACTION_ADD:
590 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
591 hn->action = MLX5E_ACTION_NONE;
594 case MLX5E_ACTION_DEL:
595 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
596 mlx5e_del_eth_addr_from_hash(hn);
605 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
607 struct ifnet *ifp = priv->ifp;
609 struct ifmultiaddr *ifma;
611 /* XXX adding this entry might not be needed */
612 mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
613 LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
616 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
617 if (ifa->ifa_addr->sa_family != AF_LINK)
619 mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
620 LLADDR((struct sockaddr_dl *)ifa->ifa_addr));
622 if_addr_runlock(ifp);
625 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
626 if (ifma->ifma_addr->sa_family != AF_LINK)
628 mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc,
629 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
631 if_maddr_runlock(ifp);
635 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
637 struct mlx5e_eth_addr_hash_node *hn;
638 struct mlx5e_eth_addr_hash_node *tmp;
641 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
642 mlx5e_execute_action(priv, hn);
644 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
645 mlx5e_execute_action(priv, hn);
649 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
651 struct mlx5e_eth_addr_hash_node *hn;
652 struct mlx5e_eth_addr_hash_node *tmp;
655 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
656 hn->action = MLX5E_ACTION_DEL;
657 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
658 hn->action = MLX5E_ACTION_DEL;
660 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
661 mlx5e_sync_ifp_addr(priv);
663 mlx5e_apply_ifp_addr(priv);
667 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
669 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
670 struct ifnet *ndev = priv->ifp;
672 bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
673 bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
674 bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
675 bool broadcast_enabled = rx_mode_enable;
677 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
678 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
679 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
680 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
681 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
682 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
684 /* update broadcast address */
685 ether_addr_copy(priv->eth_addr.broadcast.addr,
686 priv->ifp->if_broadcastaddr);
689 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
691 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
692 if (enable_broadcast)
693 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
695 mlx5e_handle_ifp_addr(priv);
697 if (disable_broadcast)
698 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
699 if (disable_allmulti)
700 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
702 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
704 ea->promisc_enabled = promisc_enabled;
705 ea->allmulti_enabled = allmulti_enabled;
706 ea->broadcast_enabled = broadcast_enabled;
710 mlx5e_set_rx_mode_work(struct work_struct *work)
712 struct mlx5e_priv *priv =
713 container_of(work, struct mlx5e_priv, set_rx_mode_work);
716 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
717 mlx5e_set_rx_mode_core(priv);
722 mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
724 struct mlx5_flow_table_group *g;
727 g = malloc(9 * sizeof(*g), M_MLX5EN, M_WAITOK | M_ZERO);
732 g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
733 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
734 outer_headers.ethertype);
735 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
736 outer_headers.ip_protocol);
739 g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
740 MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
741 outer_headers.ethertype);
746 g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
747 dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
748 outer_headers.dmac_47_16);
749 memset(dmac, 0xff, ETH_ALEN);
750 MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
751 outer_headers.ethertype);
752 MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
753 outer_headers.ip_protocol);
756 g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
757 dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
758 outer_headers.dmac_47_16);
759 memset(dmac, 0xff, ETH_ALEN);
760 MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
761 outer_headers.ethertype);
764 g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
765 dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
766 outer_headers.dmac_47_16);
767 memset(dmac, 0xff, ETH_ALEN);
770 g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
771 dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
772 outer_headers.dmac_47_16);
774 MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
775 outer_headers.ethertype);
776 MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
777 outer_headers.ip_protocol);
780 g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
781 dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
782 outer_headers.dmac_47_16);
784 MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
785 outer_headers.ethertype);
788 g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
789 dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
790 outer_headers.dmac_47_16);
792 priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
793 MLX5_FLOW_TABLE_TYPE_NIC_RCV,
797 return (priv->ft.main ? 0 : -ENOMEM);
801 mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
803 mlx5_destroy_flow_table(priv->ft.main);
804 priv->ft.main = NULL;
808 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
810 struct mlx5_flow_table_group *g;
812 g = malloc(2 * sizeof(*g), M_MLX5EN, M_WAITOK | M_ZERO);
817 g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
818 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
819 outer_headers.vlan_tag);
820 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
821 outer_headers.first_vid);
823 /* untagged + any vlan id */
825 g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
826 MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
827 outer_headers.vlan_tag);
829 priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
830 MLX5_FLOW_TABLE_TYPE_NIC_RCV,
834 return (priv->ft.vlan ? 0 : -ENOMEM);
838 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
840 mlx5_destroy_flow_table(priv->ft.vlan);
841 priv->ft.vlan = NULL;
845 mlx5e_open_flow_table(struct mlx5e_priv *priv)
849 err = mlx5e_create_main_flow_table(priv);
853 err = mlx5e_create_vlan_flow_table(priv);
855 goto err_destroy_main_flow_table;
859 err_destroy_main_flow_table:
860 mlx5e_destroy_main_flow_table(priv);
866 mlx5e_close_flow_table(struct mlx5e_priv *priv)
868 mlx5e_destroy_vlan_flow_table(priv);
869 mlx5e_destroy_main_flow_table(priv);