2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/flow_table.h>
31 #include <dev/mlx5/eswitch_vacl.h>
32 #include "mlx5_core.h"
35 MLX5_ACL_LOOPBACK_GROUP_IDX = 0,
36 MLX5_ACL_UNTAGGED_GROUP_IDX = 1,
37 MLX5_ACL_VLAN_GROUP_IDX = 2,
38 MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX = 3,
39 MLX5_ACL_DEFAULT_GROUP_IDX = 4,
49 struct mlx5_vacl_table {
50 struct mlx5_core_dev *dev;
56 struct mlx_vacl_fr loopback_fr;
57 struct mlx_vacl_fr untagged_fr;
58 struct mlx_vacl_fr unknown_vlan_fr;
59 struct mlx_vacl_fr default_fr;
61 bool vlan_filter_enabled;
62 bool vlan_filter_applied;
63 unsigned long *vlan_allowed_bitmap;
64 u32 vlan_fi_table[4096];
66 bool spoofchk_enabled;
70 static int mlx5_vacl_table_allow_vlan(void *acl_t, u16 vlan)
72 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
73 u32 *flow_context = NULL;
74 void *in_match_criteria = NULL;
75 void *in_match_value = NULL;
77 int vlan_mc_enable = MLX5_MATCH_OUTER_HEADERS;
80 if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
83 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
89 in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
90 if (!in_match_criteria) {
96 MLX5_SET(flow_context, flow_context, action,
97 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
98 in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
99 MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 1);
100 MLX5_SET(fte_match_param, in_match_value, outer_headers.first_vid,
102 MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
103 MLX5_SET(fte_match_param, in_match_criteria, outer_headers.first_vid,
105 if (acl_table->spoofchk_enabled) {
106 smac = MLX5_ADDR_OF(fte_match_param,
108 outer_headers.smac_47_16);
109 ether_addr_copy(smac, acl_table->smac);
110 smac = MLX5_ADDR_OF(fte_match_param,
112 outer_headers.smac_47_16);
113 memset(smac, 0xff, ETH_ALEN);
115 err = mlx5_add_flow_table_entry(acl_table->ft, vlan_mc_enable,
116 in_match_criteria, flow_context,
117 &acl_table->vlan_fi_table[vlan]);
121 if (in_match_criteria)
122 vfree(in_match_criteria);
126 static int mlx5_vacl_table_apply_loopback_filter(void *acl_t, u16 new_action)
128 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
129 u8 loopback_mc_enable = MLX5_MATCH_MISC_PARAMETERS;
130 u32 *flow_context = NULL;
131 void *in_match_criteria = NULL;
132 void *in_match_value = NULL;
133 void *mv_misc = NULL;
134 void *mc_misc = NULL;
137 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
143 in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
144 if (!in_match_criteria) {
149 if (acl_table->loopback_fr.applied)
150 mlx5_del_flow_table_entry(acl_table->ft,
151 acl_table->loopback_fr.fi);
153 /* Apply new loopback rule */
154 MLX5_SET(flow_context, flow_context, action, new_action);
155 in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
156 mv_misc = MLX5_ADDR_OF(fte_match_param, in_match_value,
158 mc_misc = MLX5_ADDR_OF(fte_match_param, in_match_criteria,
160 MLX5_SET(fte_match_set_misc, mv_misc, source_port, acl_table->vport);
162 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
164 err = mlx5_add_flow_table_entry(acl_table->ft, loopback_mc_enable,
165 in_match_criteria, flow_context,
166 &acl_table->loopback_fr.fi);
168 acl_table->loopback_fr.applied = false;
170 acl_table->loopback_fr.applied = true;
171 acl_table->loopback_fr.action = new_action;
177 if (in_match_criteria)
178 vfree(in_match_criteria);
182 static int mlx5_vacl_table_apply_default(void *acl_t, u16 new_action)
184 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
185 u8 default_mc_enable = 0;
186 u32 *flow_context = NULL;
187 void *in_match_criteria = NULL;
190 if (!acl_table->spoofchk_enabled)
193 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
199 in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
200 if (!in_match_criteria) {
205 if (acl_table->default_fr.applied)
206 mlx5_del_flow_table_entry(acl_table->ft,
207 acl_table->default_fr.fi);
209 /* Apply new default rule */
210 MLX5_SET(flow_context, flow_context, action, new_action);
211 err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
212 in_match_criteria, flow_context,
213 &acl_table->default_fr.fi);
215 acl_table->default_fr.applied = false;
217 acl_table->default_fr.applied = true;
218 acl_table->default_fr.action = new_action;
224 if (in_match_criteria)
225 vfree(in_match_criteria);
229 static int mlx5_vacl_table_apply_untagged(void *acl_t, u16 new_action)
231 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
232 u8 untagged_mc_enable = MLX5_MATCH_OUTER_HEADERS;
234 u32 *flow_context = NULL;
235 void *in_match_criteria = NULL;
236 void *in_match_value = NULL;
239 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
245 in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
246 if (!in_match_criteria) {
251 if (acl_table->untagged_fr.applied)
252 mlx5_del_flow_table_entry(acl_table->ft,
253 acl_table->untagged_fr.fi);
255 /* Apply new untagged rule */
256 MLX5_SET(flow_context, flow_context, action, new_action);
257 in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
258 MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 0);
259 MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
260 if (acl_table->spoofchk_enabled) {
261 smac = MLX5_ADDR_OF(fte_match_param,
263 outer_headers.smac_47_16);
264 ether_addr_copy(smac, acl_table->smac);
265 smac = MLX5_ADDR_OF(fte_match_param,
267 outer_headers.smac_47_16);
268 memset(smac, 0xff, ETH_ALEN);
270 err = mlx5_add_flow_table_entry(acl_table->ft, untagged_mc_enable,
271 in_match_criteria, flow_context,
272 &acl_table->untagged_fr.fi);
274 acl_table->untagged_fr.applied = false;
276 acl_table->untagged_fr.applied = true;
277 acl_table->untagged_fr.action = new_action;
283 if (in_match_criteria)
284 vfree(in_match_criteria);
288 static int mlx5_vacl_table_apply_unknown_vlan(void *acl_t, u16 new_action)
290 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
291 u8 default_mc_enable = (!acl_table->spoofchk_enabled) ? 0 :
292 MLX5_MATCH_OUTER_HEADERS;
293 u32 *flow_context = NULL;
294 void *in_match_criteria = NULL;
295 void *in_match_value = NULL;
299 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
305 in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
306 if (!in_match_criteria) {
311 if (acl_table->unknown_vlan_fr.applied)
312 mlx5_del_flow_table_entry(acl_table->ft,
313 acl_table->unknown_vlan_fr.fi);
315 /* Apply new unknown vlan rule */
316 MLX5_SET(flow_context, flow_context, action, new_action);
317 if (acl_table->spoofchk_enabled) {
318 in_match_value = MLX5_ADDR_OF(flow_context, flow_context,
320 smac = MLX5_ADDR_OF(fte_match_param,
322 outer_headers.smac_47_16);
323 ether_addr_copy(smac, acl_table->smac);
324 smac = MLX5_ADDR_OF(fte_match_param,
326 outer_headers.smac_47_16);
327 memset(smac, 0xff, ETH_ALEN);
329 err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
330 in_match_criteria, flow_context,
331 &acl_table->unknown_vlan_fr.fi);
333 acl_table->unknown_vlan_fr.applied = false;
335 acl_table->unknown_vlan_fr.applied = true;
336 acl_table->unknown_vlan_fr.action = new_action;
342 if (in_match_criteria)
343 vfree(in_match_criteria);
347 static int mlx5_vacl_table_apply_vlan_filter(void *acl_t)
349 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
354 if (acl_table->vlan_filter_applied)
357 for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
359 index = find_next_bit(acl_table->vlan_allowed_bitmap,
361 err = mlx5_vacl_table_allow_vlan(acl_t, index);
363 goto err_disable_vlans;
366 acl_table->vlan_filter_applied = true;
370 for (err_index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
372 err_index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
374 mlx5_del_flow_table_entry(acl_table->ft,
375 acl_table->vlan_fi_table[err_index]);
380 static void mlx5_vacl_table_disapply_vlan_filter(void *acl_t)
382 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
385 if (!acl_table->vlan_filter_applied)
388 for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
390 index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
392 mlx5_del_flow_table_entry(acl_table->ft,
393 acl_table->vlan_fi_table[index]);
396 acl_table->vlan_filter_applied = false;
399 static void mlx5_vacl_table_disapply_all_filters(void *acl_t)
401 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
403 if (acl_table->default_fr.applied) {
404 mlx5_del_flow_table_entry(acl_table->ft,
405 acl_table->default_fr.fi);
406 acl_table->default_fr.applied = false;
408 if (acl_table->unknown_vlan_fr.applied) {
409 mlx5_del_flow_table_entry(acl_table->ft,
410 acl_table->unknown_vlan_fr.fi);
411 acl_table->unknown_vlan_fr.applied = false;
413 if (acl_table->loopback_fr.applied) {
414 mlx5_del_flow_table_entry(acl_table->ft,
415 acl_table->loopback_fr.fi);
416 acl_table->loopback_fr.applied = false;
418 if (acl_table->untagged_fr.applied) {
419 mlx5_del_flow_table_entry(acl_table->ft,
420 acl_table->untagged_fr.fi);
421 acl_table->untagged_fr.applied = false;
423 if (acl_table->vlan_filter_applied) {
424 mlx5_vacl_table_disapply_vlan_filter(acl_t);
425 acl_table->vlan_filter_applied = false;
429 static int mlx5_vacl_table_apply_all_filters(void *acl_t)
431 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
434 if (!acl_table->default_fr.applied && acl_table->spoofchk_enabled) {
435 err = mlx5_vacl_table_apply_default(acl_table,
436 acl_table->default_fr.action);
438 goto err_disapply_all;
441 if (!acl_table->unknown_vlan_fr.applied) {
442 err = mlx5_vacl_table_apply_unknown_vlan(acl_table,
443 acl_table->unknown_vlan_fr.action);
445 goto err_disapply_all;
448 if (!acl_table->loopback_fr.applied &&
449 acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
450 err = mlx5_vacl_table_apply_loopback_filter(
452 acl_table->loopback_fr.action);
454 goto err_disapply_all;
457 if (!acl_table->untagged_fr.applied) {
458 err = mlx5_vacl_table_apply_untagged(acl_table,
459 acl_table->untagged_fr.action);
461 goto err_disapply_all;
464 if (!acl_table->vlan_filter_applied && acl_table->vlan_filter_enabled) {
465 err = mlx5_vacl_table_apply_vlan_filter(acl_t);
467 goto err_disapply_all;
473 mlx5_vacl_table_disapply_all_filters(acl_t);
479 static void mlx5_vacl_table_destroy_ft(void *acl_t)
481 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
483 mlx5_vacl_table_disapply_all_filters(acl_t);
485 mlx5_destroy_flow_table(acl_table->ft);
486 acl_table->ft = NULL;
489 static int mlx5_vacl_table_create_ft(void *acl_t, bool spoofchk)
491 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
494 int groups_num = MLX5_ACL_GROUPS_NUM - 1;
495 int shift_idx = MLX5_ACL_UNTAGGED_GROUP_IDX;
497 struct mlx5_flow_table_group *g;
502 g = kcalloc(MLX5_ACL_GROUPS_NUM, sizeof(*g), GFP_KERNEL);
506 acl_table->spoofchk_enabled = spoofchk;
511 log_acl_ft_size = 4096;
513 * for loopback filter rule
515 log_acl_ft_size += 1;
519 log_acl_ft_size += 1;
521 * for unknown vlan rule
523 log_acl_ft_size += 1;
527 log_acl_ft_size += 1;
529 log_acl_ft_size = order_base_2(log_acl_ft_size);
530 log_acl_ft_size = min_t(int, log_acl_ft_size, acl_table->max_ft_size);
532 if (log_acl_ft_size < 2)
535 if (acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
536 /* Loopback filter group */
537 g[MLX5_ACL_LOOPBACK_GROUP_IDX].log_sz = 0;
538 g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria_enable =
539 MLX5_MATCH_MISC_PARAMETERS;
540 MLX5_SET_TO_ONES(fte_match_param,
541 g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria,
542 misc_parameters.source_port);
544 shift_idx = MLX5_ACL_LOOPBACK_GROUP_IDX;
546 /* Untagged traffic group */
547 g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].log_sz = 0;
548 g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria_enable =
549 MLX5_MATCH_OUTER_HEADERS;
550 MLX5_SET(fte_match_param,
551 g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria,
552 outer_headers.vlan_tag, 1);
554 smac = MLX5_ADDR_OF(fte_match_param,
555 g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx]
557 outer_headers.smac_47_16);
558 memset(smac, 0xff, ETH_ALEN);
561 /* Allowed vlans group */
562 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].log_sz = log_acl_ft_size - 1;
563 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
564 MLX5_MATCH_OUTER_HEADERS;
565 MLX5_SET(fte_match_param,
566 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
567 outer_headers.vlan_tag, 1);
568 MLX5_SET(fte_match_param,
569 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
570 outer_headers.first_vid, 0xfff);
572 smac = MLX5_ADDR_OF(fte_match_param,
573 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx]
575 outer_headers.smac_47_16);
576 memset(smac, 0xff, ETH_ALEN);
579 /* Unknown vlan traffic group */
580 g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].log_sz = 0;
581 g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
582 (spoofchk ? MLX5_MATCH_OUTER_HEADERS : 0);
586 g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx]
588 outer_headers.smac_47_16);
589 memset(smac, 0xff, ETH_ALEN);
593 * Default group - for spoofchk only.
595 g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].log_sz = 0;
596 g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].match_criteria_enable = 0;
598 acl_table->ft = mlx5_create_flow_table(acl_table->dev,
604 if (!acl_table->ft) {
609 err = mlx5_vacl_table_apply_all_filters(acl_t);
616 mlx5_vacl_table_destroy_ft(acl_table->ft);
617 acl_table->ft = NULL;
624 void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
625 u16 vport, bool is_egress)
627 struct mlx5_vacl_table *acl_table;
630 if (is_egress && !MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev, ft_support))
633 if (!is_egress && !MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev, ft_support))
636 acl_table = kzalloc(sizeof(*acl_table), GFP_KERNEL);
640 acl_table->acl_type = is_egress ? MLX5_FLOW_TABLE_TYPE_EGRESS_ACL :
641 MLX5_FLOW_TABLE_TYPE_INGRESS_ACL;
642 acl_table->max_ft_size = (is_egress ?
643 MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev,
645 MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev,
647 acl_table->dev = dev;
648 acl_table->vport = vport;
651 * default behavior : Allow and if spoofchk drop the default
653 acl_table->default_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
654 acl_table->loopback_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
655 acl_table->unknown_vlan_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
656 acl_table->untagged_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
657 err = mlx5_vacl_table_create_ft(acl_table, false);
659 goto err_free_acl_table;
661 acl_table->vlan_allowed_bitmap = kcalloc(BITS_TO_LONGS(4096),
664 if (!acl_table->vlan_allowed_bitmap)
670 mlx5_vacl_table_destroy_ft(acl_table->ft);
671 acl_table->ft = NULL;
678 return (void *)acl_table;
680 EXPORT_SYMBOL(mlx5_vacl_table_create);
682 void mlx5_vacl_table_cleanup(void *acl_t)
684 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
686 mlx5_vacl_table_destroy_ft(acl_t);
687 kfree(acl_table->vlan_allowed_bitmap);
690 EXPORT_SYMBOL(mlx5_vacl_table_cleanup);
692 int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan)
694 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
697 if (test_bit(vlan, acl_table->vlan_allowed_bitmap))
699 __set_bit(vlan, acl_table->vlan_allowed_bitmap);
700 if (!acl_table->vlan_filter_applied)
703 err = mlx5_vacl_table_allow_vlan(acl_t, vlan);
710 __clear_bit(vlan, acl_table->vlan_allowed_bitmap);
715 EXPORT_SYMBOL(mlx5_vacl_table_add_vlan);
717 void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan)
719 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
721 if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
724 __clear_bit(vlan, acl_table->vlan_allowed_bitmap);
726 if (!acl_table->vlan_filter_applied)
729 mlx5_del_flow_table_entry(acl_table->ft,
730 acl_table->vlan_fi_table[vlan]);
732 EXPORT_SYMBOL(mlx5_vacl_table_del_vlan);
734 int mlx5_vacl_table_enable_vlan_filter(void *acl_t)
736 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
738 acl_table->vlan_filter_enabled = true;
739 return mlx5_vacl_table_apply_vlan_filter(acl_t);
741 EXPORT_SYMBOL(mlx5_vacl_table_enable_vlan_filter);
743 void mlx5_vacl_table_disable_vlan_filter(void *acl_t)
745 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
747 acl_table->vlan_filter_enabled = false;
748 mlx5_vacl_table_disapply_vlan_filter(acl_t);
750 EXPORT_SYMBOL(mlx5_vacl_table_disable_vlan_filter);
752 int mlx5_vacl_table_drop_untagged(void *acl_t)
754 return mlx5_vacl_table_apply_untagged(acl_t,
755 MLX5_FLOW_CONTEXT_ACTION_DROP);
757 EXPORT_SYMBOL(mlx5_vacl_table_drop_untagged);
759 int mlx5_vacl_table_allow_untagged(void *acl_t)
761 return mlx5_vacl_table_apply_untagged(acl_t,
762 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
764 EXPORT_SYMBOL(mlx5_vacl_table_allow_untagged);
766 int mlx5_vacl_table_drop_unknown_vlan(void *acl_t)
768 return mlx5_vacl_table_apply_unknown_vlan(acl_t,
769 MLX5_FLOW_CONTEXT_ACTION_DROP);
771 EXPORT_SYMBOL(mlx5_vacl_table_drop_unknown_vlan);
773 int mlx5_vacl_table_allow_unknown_vlan(void *acl_t)
775 return mlx5_vacl_table_apply_unknown_vlan(acl_t,
776 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
778 EXPORT_SYMBOL(mlx5_vacl_table_allow_unknown_vlan);
780 int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac)
782 struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
785 if (spoofchk == acl_table->spoofchk_enabled) {
787 (spoofchk && !memcmp(acl_table->smac, vport_mac, ETH_ALEN)))
791 ether_addr_copy(acl_table->smac, vport_mac);
792 if (spoofchk != acl_table->spoofchk_enabled) {
793 mlx5_vacl_table_destroy_ft(acl_t);
794 err = mlx5_vacl_table_create_ft(acl_t, spoofchk);
796 mlx5_vacl_table_disapply_all_filters(acl_t);
797 err = mlx5_vacl_table_apply_all_filters(acl_t);
802 EXPORT_SYMBOL(mlx5_vacl_table_set_spoofchk);