2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/if_ether.h>
29 #include <linux/etherdevice.h>
30 #include <dev/mlx5/driver.h>
31 #include <dev/mlx5/flow_table.h>
32 #include <dev/mlx5/eswitch_vacl.h>
33 #include "mlx5_core.h"
36 MLX5_ACL_LOOPBACK_GROUP_IDX = 0,
37 MLX5_ACL_UNTAGGED_GROUP_IDX = 1,
38 MLX5_ACL_VLAN_GROUP_IDX = 2,
39 MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX = 3,
40 MLX5_ACL_DEFAULT_GROUP_IDX = 4,
50 struct mlx5_vacl_table {
51 struct mlx5_core_dev *dev;
57 struct mlx_vacl_fr loopback_fr;
58 struct mlx_vacl_fr untagged_fr;
59 struct mlx_vacl_fr unknown_vlan_fr;
60 struct mlx_vacl_fr default_fr;
62 bool vlan_filter_enabled;
63 bool vlan_filter_applied;
64 unsigned long *vlan_allowed_bitmap;
65 u32 vlan_fi_table[4096];
67 bool spoofchk_enabled;
71 static int mlx5_vacl_table_allow_vlan(void *acl_t, u16 vlan)
73 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
74 u32 *flow_context = NULL;
75 void *in_match_criteria = NULL;
76 void *in_match_value = NULL;
78 int vlan_mc_enable = MLX5_MATCH_OUTER_HEADERS;
81 if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
84 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
90 in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
91 if (!in_match_criteria) {
97 MLX5_SET(flow_context, flow_context, action,
98 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
99 in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
100 MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 1);
101 MLX5_SET(fte_match_param, in_match_value, outer_headers.first_vid,
103 MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
104 MLX5_SET(fte_match_param, in_match_criteria, outer_headers.first_vid,
106 if (acl_table->spoofchk_enabled) {
107 smac = MLX5_ADDR_OF(fte_match_param,
109 outer_headers.smac_47_16);
110 ether_addr_copy(smac, acl_table->smac);
111 smac = MLX5_ADDR_OF(fte_match_param,
113 outer_headers.smac_47_16);
114 memset(smac, 0xff, ETH_ALEN);
116 err = mlx5_add_flow_table_entry(acl_table->ft, vlan_mc_enable,
117 in_match_criteria, flow_context,
118 &acl_table->vlan_fi_table[vlan]);
122 if (in_match_criteria)
123 vfree(in_match_criteria);
127 static int mlx5_vacl_table_apply_loopback_filter(void *acl_t, u16 new_action)
129 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
130 u8 loopback_mc_enable = MLX5_MATCH_MISC_PARAMETERS;
131 u32 *flow_context = NULL;
132 void *in_match_criteria = NULL;
133 void *in_match_value = NULL;
134 void *mv_misc = NULL;
135 void *mc_misc = NULL;
138 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
144 in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
145 if (!in_match_criteria) {
150 if (acl_table->loopback_fr.applied)
151 mlx5_del_flow_table_entry(acl_table->ft,
152 acl_table->loopback_fr.fi);
154 /* Apply new loopback rule */
155 MLX5_SET(flow_context, flow_context, action, new_action);
156 in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
157 mv_misc = MLX5_ADDR_OF(fte_match_param, in_match_value,
159 mc_misc = MLX5_ADDR_OF(fte_match_param, in_match_criteria,
161 MLX5_SET(fte_match_set_misc, mv_misc, source_port, acl_table->vport);
163 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
165 err = mlx5_add_flow_table_entry(acl_table->ft, loopback_mc_enable,
166 in_match_criteria, flow_context,
167 &acl_table->loopback_fr.fi);
169 acl_table->loopback_fr.applied = false;
171 acl_table->loopback_fr.applied = true;
172 acl_table->loopback_fr.action = new_action;
178 if (in_match_criteria)
179 vfree(in_match_criteria);
183 static int mlx5_vacl_table_apply_default(void *acl_t, u16 new_action)
185 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
186 u8 default_mc_enable = 0;
187 u32 *flow_context = NULL;
188 void *in_match_criteria = NULL;
191 if (!acl_table->spoofchk_enabled)
194 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
200 in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
201 if (!in_match_criteria) {
206 if (acl_table->default_fr.applied)
207 mlx5_del_flow_table_entry(acl_table->ft,
208 acl_table->default_fr.fi);
210 /* Apply new default rule */
211 MLX5_SET(flow_context, flow_context, action, new_action);
212 err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
213 in_match_criteria, flow_context,
214 &acl_table->default_fr.fi);
216 acl_table->default_fr.applied = false;
218 acl_table->default_fr.applied = true;
219 acl_table->default_fr.action = new_action;
225 if (in_match_criteria)
226 vfree(in_match_criteria);
230 static int mlx5_vacl_table_apply_untagged(void *acl_t, u16 new_action)
232 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
233 u8 untagged_mc_enable = MLX5_MATCH_OUTER_HEADERS;
235 u32 *flow_context = NULL;
236 void *in_match_criteria = NULL;
237 void *in_match_value = NULL;
240 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
246 in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
247 if (!in_match_criteria) {
252 if (acl_table->untagged_fr.applied)
253 mlx5_del_flow_table_entry(acl_table->ft,
254 acl_table->untagged_fr.fi);
256 /* Apply new untagged rule */
257 MLX5_SET(flow_context, flow_context, action, new_action);
258 in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
259 MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 0);
260 MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
261 if (acl_table->spoofchk_enabled) {
262 smac = MLX5_ADDR_OF(fte_match_param,
264 outer_headers.smac_47_16);
265 ether_addr_copy(smac, acl_table->smac);
266 smac = MLX5_ADDR_OF(fte_match_param,
268 outer_headers.smac_47_16);
269 memset(smac, 0xff, ETH_ALEN);
271 err = mlx5_add_flow_table_entry(acl_table->ft, untagged_mc_enable,
272 in_match_criteria, flow_context,
273 &acl_table->untagged_fr.fi);
275 acl_table->untagged_fr.applied = false;
277 acl_table->untagged_fr.applied = true;
278 acl_table->untagged_fr.action = new_action;
284 if (in_match_criteria)
285 vfree(in_match_criteria);
289 static int mlx5_vacl_table_apply_unknown_vlan(void *acl_t, u16 new_action)
291 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
292 u8 default_mc_enable = (!acl_table->spoofchk_enabled) ? 0 :
293 MLX5_MATCH_OUTER_HEADERS;
294 u32 *flow_context = NULL;
295 void *in_match_criteria = NULL;
296 void *in_match_value = NULL;
300 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
306 in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
307 if (!in_match_criteria) {
312 if (acl_table->unknown_vlan_fr.applied)
313 mlx5_del_flow_table_entry(acl_table->ft,
314 acl_table->unknown_vlan_fr.fi);
316 /* Apply new unknown vlan rule */
317 MLX5_SET(flow_context, flow_context, action, new_action);
318 if (acl_table->spoofchk_enabled) {
319 in_match_value = MLX5_ADDR_OF(flow_context, flow_context,
321 smac = MLX5_ADDR_OF(fte_match_param,
323 outer_headers.smac_47_16);
324 ether_addr_copy(smac, acl_table->smac);
325 smac = MLX5_ADDR_OF(fte_match_param,
327 outer_headers.smac_47_16);
328 memset(smac, 0xff, ETH_ALEN);
330 err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
331 in_match_criteria, flow_context,
332 &acl_table->unknown_vlan_fr.fi);
334 acl_table->unknown_vlan_fr.applied = false;
336 acl_table->unknown_vlan_fr.applied = true;
337 acl_table->unknown_vlan_fr.action = new_action;
343 if (in_match_criteria)
344 vfree(in_match_criteria);
348 static int mlx5_vacl_table_apply_vlan_filter(void *acl_t)
350 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
355 if (acl_table->vlan_filter_applied)
358 for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
360 index = find_next_bit(acl_table->vlan_allowed_bitmap,
362 err = mlx5_vacl_table_allow_vlan(acl_t, index);
364 goto err_disable_vlans;
367 acl_table->vlan_filter_applied = true;
371 for (err_index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
373 err_index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
375 mlx5_del_flow_table_entry(acl_table->ft,
376 acl_table->vlan_fi_table[err_index]);
381 static void mlx5_vacl_table_disapply_vlan_filter(void *acl_t)
383 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
386 if (!acl_table->vlan_filter_applied)
389 for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
391 index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
393 mlx5_del_flow_table_entry(acl_table->ft,
394 acl_table->vlan_fi_table[index]);
397 acl_table->vlan_filter_applied = false;
400 static void mlx5_vacl_table_disapply_all_filters(void *acl_t)
402 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
404 if (acl_table->default_fr.applied) {
405 mlx5_del_flow_table_entry(acl_table->ft,
406 acl_table->default_fr.fi);
407 acl_table->default_fr.applied = false;
409 if (acl_table->unknown_vlan_fr.applied) {
410 mlx5_del_flow_table_entry(acl_table->ft,
411 acl_table->unknown_vlan_fr.fi);
412 acl_table->unknown_vlan_fr.applied = false;
414 if (acl_table->loopback_fr.applied) {
415 mlx5_del_flow_table_entry(acl_table->ft,
416 acl_table->loopback_fr.fi);
417 acl_table->loopback_fr.applied = false;
419 if (acl_table->untagged_fr.applied) {
420 mlx5_del_flow_table_entry(acl_table->ft,
421 acl_table->untagged_fr.fi);
422 acl_table->untagged_fr.applied = false;
424 if (acl_table->vlan_filter_applied) {
425 mlx5_vacl_table_disapply_vlan_filter(acl_t);
426 acl_table->vlan_filter_applied = false;
430 static int mlx5_vacl_table_apply_all_filters(void *acl_t)
432 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
435 if (!acl_table->default_fr.applied && acl_table->spoofchk_enabled) {
436 err = mlx5_vacl_table_apply_default(acl_table,
437 acl_table->default_fr.action);
439 goto err_disapply_all;
442 if (!acl_table->unknown_vlan_fr.applied) {
443 err = mlx5_vacl_table_apply_unknown_vlan(acl_table,
444 acl_table->unknown_vlan_fr.action);
446 goto err_disapply_all;
449 if (!acl_table->loopback_fr.applied &&
450 acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
451 err = mlx5_vacl_table_apply_loopback_filter(
453 acl_table->loopback_fr.action);
455 goto err_disapply_all;
458 if (!acl_table->untagged_fr.applied) {
459 err = mlx5_vacl_table_apply_untagged(acl_table,
460 acl_table->untagged_fr.action);
462 goto err_disapply_all;
465 if (!acl_table->vlan_filter_applied && acl_table->vlan_filter_enabled) {
466 err = mlx5_vacl_table_apply_vlan_filter(acl_t);
468 goto err_disapply_all;
474 mlx5_vacl_table_disapply_all_filters(acl_t);
480 static void mlx5_vacl_table_destroy_ft(void *acl_t)
482 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
484 mlx5_vacl_table_disapply_all_filters(acl_t);
486 mlx5_destroy_flow_table(acl_table->ft);
487 acl_table->ft = NULL;
490 static int mlx5_vacl_table_create_ft(void *acl_t, bool spoofchk)
492 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
495 int groups_num = MLX5_ACL_GROUPS_NUM - 1;
496 int shift_idx = MLX5_ACL_UNTAGGED_GROUP_IDX;
498 struct mlx5_flow_table_group *g;
503 g = kcalloc(MLX5_ACL_GROUPS_NUM, sizeof(*g), GFP_KERNEL);
507 acl_table->spoofchk_enabled = spoofchk;
512 log_acl_ft_size = 4096;
514 * for loopback filter rule
516 log_acl_ft_size += 1;
520 log_acl_ft_size += 1;
522 * for unknown vlan rule
524 log_acl_ft_size += 1;
528 log_acl_ft_size += 1;
530 log_acl_ft_size = order_base_2(log_acl_ft_size);
531 log_acl_ft_size = min_t(int, log_acl_ft_size, acl_table->max_ft_size);
533 if (log_acl_ft_size < 2)
536 if (acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
537 /* Loopback filter group */
538 g[MLX5_ACL_LOOPBACK_GROUP_IDX].log_sz = 0;
539 g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria_enable =
540 MLX5_MATCH_MISC_PARAMETERS;
541 MLX5_SET_TO_ONES(fte_match_param,
542 g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria,
543 misc_parameters.source_port);
545 shift_idx = MLX5_ACL_LOOPBACK_GROUP_IDX;
547 /* Untagged traffic group */
548 g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].log_sz = 0;
549 g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria_enable =
550 MLX5_MATCH_OUTER_HEADERS;
551 MLX5_SET(fte_match_param,
552 g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria,
553 outer_headers.vlan_tag, 1);
555 smac = MLX5_ADDR_OF(fte_match_param,
556 g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx]
558 outer_headers.smac_47_16);
559 memset(smac, 0xff, ETH_ALEN);
562 /* Allowed vlans group */
563 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].log_sz = log_acl_ft_size - 1;
564 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
565 MLX5_MATCH_OUTER_HEADERS;
566 MLX5_SET(fte_match_param,
567 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
568 outer_headers.vlan_tag, 1);
569 MLX5_SET(fte_match_param,
570 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
571 outer_headers.first_vid, 0xfff);
573 smac = MLX5_ADDR_OF(fte_match_param,
574 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx]
576 outer_headers.smac_47_16);
577 memset(smac, 0xff, ETH_ALEN);
580 /* Unknown vlan traffic group */
581 g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].log_sz = 0;
582 g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
583 (spoofchk ? MLX5_MATCH_OUTER_HEADERS : 0);
587 g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx]
589 outer_headers.smac_47_16);
590 memset(smac, 0xff, ETH_ALEN);
594 * Default group - for spoofchk only.
596 g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].log_sz = 0;
597 g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].match_criteria_enable = 0;
599 acl_table->ft = mlx5_create_flow_table(acl_table->dev,
605 if (!acl_table->ft) {
610 err = mlx5_vacl_table_apply_all_filters(acl_t);
617 mlx5_vacl_table_destroy_ft(acl_table->ft);
618 acl_table->ft = NULL;
625 void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
626 u16 vport, bool is_egress)
628 struct mlx5_vacl_table *acl_table;
631 if (is_egress && !MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev, ft_support))
634 if (!is_egress && !MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev, ft_support))
637 acl_table = kzalloc(sizeof(*acl_table), GFP_KERNEL);
641 acl_table->acl_type = is_egress ? MLX5_FLOW_TABLE_TYPE_EGRESS_ACL :
642 MLX5_FLOW_TABLE_TYPE_INGRESS_ACL;
643 acl_table->max_ft_size = (is_egress ?
644 MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev,
646 MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev,
648 acl_table->dev = dev;
649 acl_table->vport = vport;
652 * default behavior : Allow and if spoofchk drop the default
654 acl_table->default_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
655 acl_table->loopback_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
656 acl_table->unknown_vlan_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
657 acl_table->untagged_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
658 err = mlx5_vacl_table_create_ft(acl_table, false);
660 goto err_free_acl_table;
662 acl_table->vlan_allowed_bitmap = kcalloc(BITS_TO_LONGS(4096),
665 if (!acl_table->vlan_allowed_bitmap)
671 mlx5_vacl_table_destroy_ft(acl_table->ft);
672 acl_table->ft = NULL;
679 return (void *)acl_table;
681 EXPORT_SYMBOL(mlx5_vacl_table_create);
683 void mlx5_vacl_table_cleanup(void *acl_t)
685 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
687 mlx5_vacl_table_destroy_ft(acl_t);
688 kfree(acl_table->vlan_allowed_bitmap);
691 EXPORT_SYMBOL(mlx5_vacl_table_cleanup);
693 int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan)
695 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
698 if (test_bit(vlan, acl_table->vlan_allowed_bitmap))
700 __set_bit(vlan, acl_table->vlan_allowed_bitmap);
701 if (!acl_table->vlan_filter_applied)
704 err = mlx5_vacl_table_allow_vlan(acl_t, vlan);
711 __clear_bit(vlan, acl_table->vlan_allowed_bitmap);
716 EXPORT_SYMBOL(mlx5_vacl_table_add_vlan);
718 void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan)
720 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
722 if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
725 __clear_bit(vlan, acl_table->vlan_allowed_bitmap);
727 if (!acl_table->vlan_filter_applied)
730 mlx5_del_flow_table_entry(acl_table->ft,
731 acl_table->vlan_fi_table[vlan]);
733 EXPORT_SYMBOL(mlx5_vacl_table_del_vlan);
735 int mlx5_vacl_table_enable_vlan_filter(void *acl_t)
737 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
739 acl_table->vlan_filter_enabled = true;
740 return mlx5_vacl_table_apply_vlan_filter(acl_t);
742 EXPORT_SYMBOL(mlx5_vacl_table_enable_vlan_filter);
744 void mlx5_vacl_table_disable_vlan_filter(void *acl_t)
746 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
748 acl_table->vlan_filter_enabled = false;
749 mlx5_vacl_table_disapply_vlan_filter(acl_t);
751 EXPORT_SYMBOL(mlx5_vacl_table_disable_vlan_filter);
753 int mlx5_vacl_table_drop_untagged(void *acl_t)
755 return mlx5_vacl_table_apply_untagged(acl_t,
756 MLX5_FLOW_CONTEXT_ACTION_DROP);
758 EXPORT_SYMBOL(mlx5_vacl_table_drop_untagged);
760 int mlx5_vacl_table_allow_untagged(void *acl_t)
762 return mlx5_vacl_table_apply_untagged(acl_t,
763 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
765 EXPORT_SYMBOL(mlx5_vacl_table_allow_untagged);
767 int mlx5_vacl_table_drop_unknown_vlan(void *acl_t)
769 return mlx5_vacl_table_apply_unknown_vlan(acl_t,
770 MLX5_FLOW_CONTEXT_ACTION_DROP);
772 EXPORT_SYMBOL(mlx5_vacl_table_drop_unknown_vlan);
774 int mlx5_vacl_table_allow_unknown_vlan(void *acl_t)
776 return mlx5_vacl_table_apply_unknown_vlan(acl_t,
777 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
779 EXPORT_SYMBOL(mlx5_vacl_table_allow_unknown_vlan);
781 int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac)
783 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
786 if (spoofchk == acl_table->spoofchk_enabled) {
788 (spoofchk && !memcmp(acl_table->smac, vport_mac, ETH_ALEN)))
792 ether_addr_copy(acl_table->smac, vport_mac);
793 if (spoofchk != acl_table->spoofchk_enabled) {
794 mlx5_vacl_table_destroy_ft(acl_t);
795 err = mlx5_vacl_table_create_ft(acl_t, spoofchk);
797 mlx5_vacl_table_disapply_all_filters(acl_t);
798 err = mlx5_vacl_table_apply_all_filters(acl_t);
803 EXPORT_SYMBOL(mlx5_vacl_table_set_spoofchk);