]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
MFV: r335802
[FreeBSD/FreeBSD.git] / sys / dev / mlx5 / mlx5_en / mlx5_en_flow_table.c
1 /*-
2  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27
28 #include "en.h"
29
30 #include <linux/list.h>
31 #include <dev/mlx5/fs.h>
32 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
33
34 enum {
35         MLX5E_FULLMATCH = 0,
36         MLX5E_ALLMULTI = 1,
37         MLX5E_PROMISC = 2,
38 };
39
40 enum {
41         MLX5E_UC = 0,
42         MLX5E_MC_IPV4 = 1,
43         MLX5E_MC_IPV6 = 2,
44         MLX5E_MC_OTHER = 3,
45 };
46
47 enum {
48         MLX5E_ACTION_NONE = 0,
49         MLX5E_ACTION_ADD = 1,
50         MLX5E_ACTION_DEL = 2,
51 };
52
53 struct mlx5e_eth_addr_hash_node {
54         LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
55         u8      action;
56         struct mlx5e_eth_addr_info ai;
57 };
58
59 static inline int
60 mlx5e_hash_eth_addr(const u8 * addr)
61 {
62         return (addr[5]);
63 }
64
65 static void
66 mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
67     const u8 * addr)
68 {
69         struct mlx5e_eth_addr_hash_node *hn;
70         int ix = mlx5e_hash_eth_addr(addr);
71
72         LIST_FOREACH(hn, &hash[ix], hlist) {
73                 if (bcmp(hn->ai.addr, addr, ETHER_ADDR_LEN) == 0) {
74                         if (hn->action == MLX5E_ACTION_DEL)
75                                 hn->action = MLX5E_ACTION_NONE;
76                         return;
77                 }
78         }
79
80         hn = malloc(sizeof(*hn), M_MLX5EN, M_NOWAIT | M_ZERO);
81         if (hn == NULL)
82                 return;
83
84         ether_addr_copy(hn->ai.addr, addr);
85         hn->action = MLX5E_ACTION_ADD;
86
87         LIST_INSERT_HEAD(&hash[ix], hn, hlist);
88 }
89
90 static void
91 mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
92 {
93         LIST_REMOVE(hn, hlist);
94         free(hn, M_MLX5EN);
95 }
96
97 static void
98 mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
99     struct mlx5e_eth_addr_info *ai)
100 {
101         if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_ESP))
102                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
103
104         if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_ESP))
105                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
106
107         if (ai->tt_vec & (1 << MLX5E_TT_IPV6_IPSEC_AH))
108                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
109
110         if (ai->tt_vec & (1 << MLX5E_TT_IPV4_IPSEC_AH))
111                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
112
113         if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
114                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
115
116         if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
117                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
118
119         if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
120                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
121
122         if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
123                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
124
125         if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
126                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
127
128         if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
129                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
130
131         if (ai->tt_vec & (1 << MLX5E_TT_ANY))
132                 mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
133 }
134
135 static int
136 mlx5e_get_eth_addr_type(const u8 * addr)
137 {
138         if (ETHER_IS_MULTICAST(addr) == 0)
139                 return (MLX5E_UC);
140
141         if ((addr[0] == 0x01) &&
142             (addr[1] == 0x00) &&
143             (addr[2] == 0x5e) &&
144             !(addr[3] & 0x80))
145                 return (MLX5E_MC_IPV4);
146
147         if ((addr[0] == 0x33) &&
148             (addr[1] == 0x33))
149                 return (MLX5E_MC_IPV6);
150
151         return (MLX5E_MC_OTHER);
152 }
153
154 static  u32
155 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
156 {
157         int eth_addr_type;
158         u32 ret;
159
160         switch (type) {
161         case MLX5E_FULLMATCH:
162                 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
163                 switch (eth_addr_type) {
164                 case MLX5E_UC:
165                         ret =
166                             (1 << MLX5E_TT_IPV4_TCP) |
167                             (1 << MLX5E_TT_IPV6_TCP) |
168                             (1 << MLX5E_TT_IPV4_UDP) |
169                             (1 << MLX5E_TT_IPV6_UDP) |
170                             (1 << MLX5E_TT_IPV4) |
171                             (1 << MLX5E_TT_IPV6) |
172                             (1 << MLX5E_TT_ANY) |
173                             0;
174                         break;
175
176                 case MLX5E_MC_IPV4:
177                         ret =
178                             (1 << MLX5E_TT_IPV4_UDP) |
179                             (1 << MLX5E_TT_IPV4) |
180                             0;
181                         break;
182
183                 case MLX5E_MC_IPV6:
184                         ret =
185                             (1 << MLX5E_TT_IPV6_UDP) |
186                             (1 << MLX5E_TT_IPV6) |
187                             0;
188                         break;
189
190                 default:
191                         ret =
192                             (1 << MLX5E_TT_ANY) |
193                             0;
194                         break;
195                 }
196                 break;
197
198         case MLX5E_ALLMULTI:
199                 ret =
200                     (1 << MLX5E_TT_IPV4_UDP) |
201                     (1 << MLX5E_TT_IPV6_UDP) |
202                     (1 << MLX5E_TT_IPV4) |
203                     (1 << MLX5E_TT_IPV6) |
204                     (1 << MLX5E_TT_ANY) |
205                     0;
206                 break;
207
208         default:                        /* MLX5E_PROMISC */
209                 ret =
210                     (1 << MLX5E_TT_IPV4_TCP) |
211                     (1 << MLX5E_TT_IPV6_TCP) |
212                     (1 << MLX5E_TT_IPV4_UDP) |
213                     (1 << MLX5E_TT_IPV6_UDP) |
214                     (1 << MLX5E_TT_IPV4) |
215                     (1 << MLX5E_TT_IPV6) |
216                     (1 << MLX5E_TT_ANY) |
217                     0;
218                 break;
219         }
220
221         return (ret);
222 }
223
224 static int
225 mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
226     struct mlx5e_eth_addr_info *ai, int type,
227     u32 *mc, u32 *mv)
228 {
229         struct mlx5_flow_destination dest;
230         u8 mc_enable = 0;
231         struct mlx5_flow_rule **rule_p;
232         struct mlx5_flow_table *ft = priv->fts.main.t;
233         u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
234                                    outer_headers.dmac_47_16);
235         u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
236                                    outer_headers.dmac_47_16);
237         u32 *tirn = priv->tirn;
238         u32 tt_vec;
239         int err = 0;
240
241         dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
242
243         switch (type) {
244         case MLX5E_FULLMATCH:
245                 mc_enable = MLX5_MATCH_OUTER_HEADERS;
246                 memset(mc_dmac, 0xff, ETH_ALEN);
247                 ether_addr_copy(mv_dmac, ai->addr);
248                 break;
249
250         case MLX5E_ALLMULTI:
251                 mc_enable = MLX5_MATCH_OUTER_HEADERS;
252                 mc_dmac[0] = 0x01;
253                 mv_dmac[0] = 0x01;
254                 break;
255
256         case MLX5E_PROMISC:
257                 break;
258         default:
259                 break;
260         }
261
262         tt_vec = mlx5e_get_tt_vec(ai, type);
263
264         if (tt_vec & BIT(MLX5E_TT_ANY)) {
265                 rule_p = &ai->ft_rule[MLX5E_TT_ANY];
266                 dest.tir_num = tirn[MLX5E_TT_ANY];
267                 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
268                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
269                                              MLX5_FS_ETH_FLOW_TAG, &dest);
270                 if (IS_ERR_OR_NULL(*rule_p))
271                         goto err_del_ai;
272                 ai->tt_vec |= BIT(MLX5E_TT_ANY);
273         }
274
275         mc_enable = MLX5_MATCH_OUTER_HEADERS;
276         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
277
278         if (tt_vec & BIT(MLX5E_TT_IPV4)) {
279                 rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
280                 dest.tir_num = tirn[MLX5E_TT_IPV4];
281                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
282                          ETHERTYPE_IP);
283                 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
284                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
285                                              MLX5_FS_ETH_FLOW_TAG, &dest);
286                 if (IS_ERR_OR_NULL(*rule_p))
287                         goto err_del_ai;
288                 ai->tt_vec |= BIT(MLX5E_TT_IPV4);
289         }
290
291         if (tt_vec & BIT(MLX5E_TT_IPV6)) {
292                 rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
293                 dest.tir_num = tirn[MLX5E_TT_IPV6];
294                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
295                          ETHERTYPE_IPV6);
296                 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
297                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
298                                              MLX5_FS_ETH_FLOW_TAG, &dest);
299                 if (IS_ERR_OR_NULL(*rule_p))
300                         goto err_del_ai;
301                 ai->tt_vec |= BIT(MLX5E_TT_IPV6);
302         }
303
304         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
305         MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
306
307         if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
308                 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
309                 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
310                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
311                          ETHERTYPE_IP);
312                 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
313                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
314                                              MLX5_FS_ETH_FLOW_TAG, &dest);
315                 if (IS_ERR_OR_NULL(*rule_p))
316                         goto err_del_ai;
317                 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
318         }
319
320         if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
321                 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
322                 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
323                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
324                          ETHERTYPE_IPV6);
325                 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
326                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
327                                              MLX5_FS_ETH_FLOW_TAG, &dest);
328                 if (IS_ERR_OR_NULL(*rule_p))
329                         goto err_del_ai;
330                 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
331         }
332
333         MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
334
335         if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
336                 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
337                 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
338                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
339                          ETHERTYPE_IP);
340                 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
341                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
342                                              MLX5_FS_ETH_FLOW_TAG, &dest);
343                 if (IS_ERR_OR_NULL(*rule_p))
344                         goto err_del_ai;
345                 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
346         }
347
348         if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
349                 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
350                 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
351                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
352                          ETHERTYPE_IPV6);
353                 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
354                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
355                                              MLX5_FS_ETH_FLOW_TAG, &dest);
356                 if (IS_ERR_OR_NULL(*rule_p))
357                         goto err_del_ai;
358
359                 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
360         }
361
362         MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
363
364         if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
365                 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
366                 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
367                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
368                          ETHERTYPE_IP);
369                 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
370                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
371                                              MLX5_FS_ETH_FLOW_TAG, &dest);
372                 if (IS_ERR_OR_NULL(*rule_p))
373                         goto err_del_ai;
374                 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
375         }
376
377         if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
378                 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
379                 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
380                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
381                          ETHERTYPE_IPV6);
382                 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
383                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
384                                              MLX5_FS_ETH_FLOW_TAG, &dest);
385                 if (IS_ERR_OR_NULL(*rule_p))
386                         goto err_del_ai;
387                 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
388         }
389
390         MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
391
392         if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
393                 rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
394                 dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
395                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
396                          ETHERTYPE_IP);
397                 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
398                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
399                                              MLX5_FS_ETH_FLOW_TAG, &dest);
400                 if (IS_ERR_OR_NULL(*rule_p))
401                         goto err_del_ai;
402                 ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
403         }
404
405         if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
406                 rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
407                 dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
408                 MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
409                          ETHERTYPE_IPV6);
410                 *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
411                                              MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
412                                              MLX5_FS_ETH_FLOW_TAG, &dest);
413                 if (IS_ERR_OR_NULL(*rule_p))
414                         goto err_del_ai;
415                 ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
416         }
417
418         return 0;
419
420 err_del_ai:
421         err = PTR_ERR(*rule_p);
422         *rule_p = NULL;
423         mlx5e_del_eth_addr_from_flow_table(priv, ai);
424
425         return err;
426 }
427
428 static int
429 mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
430     struct mlx5e_eth_addr_info *ai, int type)
431 {
432         u32 *match_criteria;
433         u32 *match_value;
434         int err = 0;
435
436         match_value     = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
437         match_criteria  = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
438         if (!match_value || !match_criteria) {
439                 if_printf(priv->ifp, "%s: alloc failed\n", __func__);
440                 err = -ENOMEM;
441                 goto add_eth_addr_rule_out;
442         }
443         err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
444             match_value);
445
446 add_eth_addr_rule_out:
447         kvfree(match_criteria);
448         kvfree(match_value);
449
450         return (err);
451 }
452
453 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
454 {
455         struct ifnet *ifp = priv->ifp;
456         int max_list_size;
457         int list_size;
458         u16 *vlans;
459         int vlan;
460         int err;
461         int i;
462
463         list_size = 0;
464         for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
465                 list_size++;
466
467         max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
468
469         if (list_size > max_list_size) {
470                 if_printf(ifp,
471                             "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
472                             list_size, max_list_size);
473                 list_size = max_list_size;
474         }
475
476         vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
477         if (!vlans)
478                 return -ENOMEM;
479
480         i = 0;
481         for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
482                 if (i >= list_size)
483                         break;
484                 vlans[i++] = vlan;
485         }
486
487         err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
488         if (err)
489                 if_printf(ifp, "Failed to modify vport vlans list err(%d)\n",
490                            err);
491
492         kfree(vlans);
493         return err;
494 }
495
496 enum mlx5e_vlan_rule_type {
497         MLX5E_VLAN_RULE_TYPE_UNTAGGED,
498         MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
499         MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
500         MLX5E_VLAN_RULE_TYPE_MATCH_VID,
501 };
502
503 static int
504 mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
505     enum mlx5e_vlan_rule_type rule_type, u16 vid,
506     u32 *mc, u32 *mv)
507 {
508         struct mlx5_flow_table *ft = priv->fts.vlan.t;
509         struct mlx5_flow_destination dest;
510         u8 mc_enable = 0;
511         struct mlx5_flow_rule **rule_p;
512         int err = 0;
513
514         dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
515         dest.ft = priv->fts.main.t;
516
517         mc_enable = MLX5_MATCH_OUTER_HEADERS;
518
519         switch (rule_type) {
520         case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
521                 rule_p = &priv->vlan.untagged_ft_rule;
522                 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
523                 break;
524         case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
525                 rule_p = &priv->vlan.any_cvlan_ft_rule;
526                 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
527                 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
528                 break;
529         case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
530                 rule_p = &priv->vlan.any_svlan_ft_rule;
531                 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
532                 MLX5_SET(fte_match_param, mv, outer_headers.svlan_tag, 1);
533                 break;
534         default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
535                 rule_p = &priv->vlan.active_vlans_ft_rule[vid];
536                 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
537                 MLX5_SET(fte_match_param, mv, outer_headers.cvlan_tag, 1);
538                 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
539                 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
540                 mlx5e_vport_context_update_vlans(priv);
541                 break;
542         }
543
544         *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
545                                      MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
546                                      MLX5_FS_ETH_FLOW_TAG,
547                                      &dest);
548
549         if (IS_ERR(*rule_p)) {
550                 err = PTR_ERR(*rule_p);
551                 *rule_p = NULL;
552                 if_printf(priv->ifp, "%s: add rule failed\n", __func__);
553         }
554
555         return (err);
556 }
557
558 static int
559 mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
560     enum mlx5e_vlan_rule_type rule_type, u16 vid)
561 {
562         u32 *match_criteria;
563         u32 *match_value;
564         int err = 0;
565
566         match_value     = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
567         match_criteria  = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
568         if (!match_value || !match_criteria) {
569                 if_printf(priv->ifp, "%s: alloc failed\n", __func__);
570                 err = -ENOMEM;
571                 goto add_vlan_rule_out;
572         }
573
574         err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
575                                     match_value);
576
577 add_vlan_rule_out:
578         kvfree(match_criteria);
579         kvfree(match_value);
580
581         return (err);
582 }
583
584 static void
585 mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
586     enum mlx5e_vlan_rule_type rule_type, u16 vid)
587 {
588         switch (rule_type) {
589         case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
590                 if (priv->vlan.untagged_ft_rule) {
591                         mlx5_del_flow_rule(priv->vlan.untagged_ft_rule);
592                         priv->vlan.untagged_ft_rule = NULL;
593                 }
594                 break;
595         case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
596                 if (priv->vlan.any_cvlan_ft_rule) {
597                         mlx5_del_flow_rule(priv->vlan.any_cvlan_ft_rule);
598                         priv->vlan.any_cvlan_ft_rule = NULL;
599                 }
600                 break;
601         case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
602                 if (priv->vlan.any_svlan_ft_rule) {
603                         mlx5_del_flow_rule(priv->vlan.any_svlan_ft_rule);
604                         priv->vlan.any_svlan_ft_rule = NULL;
605                 }
606                 break;
607         case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
608                 if (priv->vlan.active_vlans_ft_rule[vid]) {
609                         mlx5_del_flow_rule(priv->vlan.active_vlans_ft_rule[vid]);
610                         priv->vlan.active_vlans_ft_rule[vid] = NULL;
611                 }
612                 mlx5e_vport_context_update_vlans(priv);
613                 break;
614         default:
615                 break;
616         }
617 }
618
619 static void
620 mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
621 {
622         mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
623         mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
624 }
625
626 static int
627 mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
628 {
629         int err;
630
631         err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
632         if (err)
633                 return (err);
634
635         return (mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0));
636 }
637
638 void
639 mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
640 {
641         if (priv->vlan.filter_disabled) {
642                 priv->vlan.filter_disabled = false;
643                 if (priv->ifp->if_flags & IFF_PROMISC)
644                         return;
645                 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
646                         mlx5e_del_any_vid_rules(priv);
647         }
648 }
649
650 void
651 mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
652 {
653         if (!priv->vlan.filter_disabled) {
654                 priv->vlan.filter_disabled = true;
655                 if (priv->ifp->if_flags & IFF_PROMISC)
656                         return;
657                 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
658                         mlx5e_add_any_vid_rules(priv);
659         }
660 }
661
662 void
663 mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
664 {
665         struct mlx5e_priv *priv = arg;
666
667         if (ifp != priv->ifp)
668                 return;
669
670         PRIV_LOCK(priv);
671         if (!test_and_set_bit(vid, priv->vlan.active_vlans) &&
672             test_bit(MLX5E_STATE_OPENED, &priv->state))
673                 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
674         PRIV_UNLOCK(priv);
675 }
676
677 void
678 mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
679 {
680         struct mlx5e_priv *priv = arg;
681
682         if (ifp != priv->ifp)
683                 return;
684
685         PRIV_LOCK(priv);
686         clear_bit(vid, priv->vlan.active_vlans);
687         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
688                 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
689         PRIV_UNLOCK(priv);
690 }
691
692 int
693 mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
694 {
695         int err;
696         int i;
697
698         set_bit(0, priv->vlan.active_vlans);
699         for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID) {
700                 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
701                                           i);
702                 if (err)
703                         return (err);
704         }
705
706         err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
707         if (err)
708                 return (err);
709
710         if (priv->vlan.filter_disabled) {
711                 err = mlx5e_add_any_vid_rules(priv);
712                 if (err)
713                         return (err);
714         }
715         return (0);
716 }
717
718 void
719 mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
720 {
721         int i;
722
723         if (priv->vlan.filter_disabled)
724                 mlx5e_del_any_vid_rules(priv);
725
726         mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
727
728         for_each_set_bit(i, priv->vlan.active_vlans, VLAN_N_VID)
729                 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
730         clear_bit(0, priv->vlan.active_vlans);
731 }
732
733 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
734         for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
735                 LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
736
737 static void
738 mlx5e_execute_action(struct mlx5e_priv *priv,
739     struct mlx5e_eth_addr_hash_node *hn)
740 {
741         switch (hn->action) {
742         case MLX5E_ACTION_ADD:
743                 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
744                 hn->action = MLX5E_ACTION_NONE;
745                 break;
746
747         case MLX5E_ACTION_DEL:
748                 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
749                 mlx5e_del_eth_addr_from_hash(hn);
750                 break;
751
752         default:
753                 break;
754         }
755 }
756
757 static void
758 mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
759 {
760         struct ifnet *ifp = priv->ifp;
761         struct ifaddr *ifa;
762         struct ifmultiaddr *ifma;
763
764         /* XXX adding this entry might not be needed */
765         mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
766             LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
767
768         if_addr_rlock(ifp);
769         CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
770                 if (ifa->ifa_addr->sa_family != AF_LINK)
771                         continue;
772                 mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
773                     LLADDR((struct sockaddr_dl *)ifa->ifa_addr));
774         }
775         if_addr_runlock(ifp);
776
777         if_maddr_rlock(ifp);
778         CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
779                 if (ifma->ifma_addr->sa_family != AF_LINK)
780                         continue;
781                 mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc,
782                     LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
783         }
784         if_maddr_runlock(ifp);
785 }
786
787 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
788                                   u8 addr_array[][ETH_ALEN], int size)
789 {
790         bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
791         struct ifnet *ifp = priv->ifp;
792         struct mlx5e_eth_addr_hash_node *hn;
793         struct mlx5e_eth_addr_hash_head *addr_list;
794         struct mlx5e_eth_addr_hash_node *tmp;
795         int i = 0;
796         int hi;
797
798         addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
799
800         if (is_uc) /* Make sure our own address is pushed first */
801                 ether_addr_copy(addr_array[i++], IF_LLADDR(ifp));
802         else if (priv->eth_addr.broadcast_enabled)
803                 ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr);
804
805         mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
806                 if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr))
807                         continue;
808                 if (i >= size)
809                         break;
810                 ether_addr_copy(addr_array[i++], hn->ai.addr);
811         }
812 }
813
814 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
815                                                  int list_type)
816 {
817         bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
818         struct mlx5e_eth_addr_hash_node *hn;
819         u8 (*addr_array)[ETH_ALEN] = NULL;
820         struct mlx5e_eth_addr_hash_head *addr_list;
821         struct mlx5e_eth_addr_hash_node *tmp;
822         int max_size;
823         int size;
824         int err;
825         int hi;
826
827         size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
828         max_size = is_uc ?
829                 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
830                 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
831
832         addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
833         mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
834                 size++;
835
836         if (size > max_size) {
837                 if_printf(priv->ifp,
838                             "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
839                             is_uc ? "UC" : "MC", size, max_size);
840                 size = max_size;
841         }
842
843         if (size) {
844                 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
845                 if (!addr_array) {
846                         err = -ENOMEM;
847                         goto out;
848                 }
849                 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
850         }
851
852         err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
853 out:
854         if (err)
855                 if_printf(priv->ifp,
856                            "Failed to modify vport %s list err(%d)\n",
857                            is_uc ? "UC" : "MC", err);
858         kfree(addr_array);
859 }
860
861 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
862 {
863         struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
864
865         mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
866         mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
867         mlx5_modify_nic_vport_promisc(priv->mdev, 0,
868                                       ea->allmulti_enabled,
869                                       ea->promisc_enabled);
870 }
871
872 static void
873 mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
874 {
875         struct mlx5e_eth_addr_hash_node *hn;
876         struct mlx5e_eth_addr_hash_node *tmp;
877         int i;
878
879         mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
880             mlx5e_execute_action(priv, hn);
881
882         mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
883             mlx5e_execute_action(priv, hn);
884 }
885
886 static void
887 mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
888 {
889         struct mlx5e_eth_addr_hash_node *hn;
890         struct mlx5e_eth_addr_hash_node *tmp;
891         int i;
892
893         mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
894             hn->action = MLX5E_ACTION_DEL;
895         mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
896             hn->action = MLX5E_ACTION_DEL;
897
898         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
899                 mlx5e_sync_ifp_addr(priv);
900
901         mlx5e_apply_ifp_addr(priv);
902 }
903
904 void
905 mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
906 {
907         struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
908         struct ifnet *ndev = priv->ifp;
909
910         bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
911         bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
912         bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
913         bool broadcast_enabled = rx_mode_enable;
914
915         bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
916         bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
917         bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
918         bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
919         bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
920         bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
921
922         /* update broadcast address */
923         ether_addr_copy(priv->eth_addr.broadcast.addr,
924             priv->ifp->if_broadcastaddr);
925
926         if (enable_promisc) {
927                 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
928                 if (!priv->vlan.filter_disabled)
929                         mlx5e_add_any_vid_rules(priv);
930         }
931         if (enable_allmulti)
932                 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
933         if (enable_broadcast)
934                 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
935
936         mlx5e_handle_ifp_addr(priv);
937
938         if (disable_broadcast)
939                 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
940         if (disable_allmulti)
941                 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
942         if (disable_promisc) {
943                 if (!priv->vlan.filter_disabled)
944                         mlx5e_del_any_vid_rules(priv);
945                 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
946         }
947
948         ea->promisc_enabled = promisc_enabled;
949         ea->allmulti_enabled = allmulti_enabled;
950         ea->broadcast_enabled = broadcast_enabled;
951
952         mlx5e_vport_context_update(priv);
953 }
954
955 void
956 mlx5e_set_rx_mode_work(struct work_struct *work)
957 {
958         struct mlx5e_priv *priv =
959             container_of(work, struct mlx5e_priv, set_rx_mode_work);
960
961         PRIV_LOCK(priv);
962         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
963                 mlx5e_set_rx_mode_core(priv);
964         PRIV_UNLOCK(priv);
965 }
966
967 static void
968 mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
969 {
970         int i;
971
972         for (i = ft->num_groups - 1; i >= 0; i--) {
973                 if (!IS_ERR_OR_NULL(ft->g[i]))
974                         mlx5_destroy_flow_group(ft->g[i]);
975                 ft->g[i] = NULL;
976         }
977         ft->num_groups = 0;
978 }
979
980 static void
981 mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
982 {
983         mlx5e_destroy_groups(ft);
984         kfree(ft->g);
985         mlx5_destroy_flow_table(ft->t);
986         ft->t = NULL;
987 }
988
989 #define MLX5E_NUM_MAIN_GROUPS   10
990 #define MLX5E_MAIN_GROUP0_SIZE  BIT(4)
991 #define MLX5E_MAIN_GROUP1_SIZE  BIT(3)
992 #define MLX5E_MAIN_GROUP2_SIZE  BIT(1)
993 #define MLX5E_MAIN_GROUP3_SIZE  BIT(0)
994 #define MLX5E_MAIN_GROUP4_SIZE  BIT(14)
995 #define MLX5E_MAIN_GROUP5_SIZE  BIT(13)
996 #define MLX5E_MAIN_GROUP6_SIZE  BIT(11)
997 #define MLX5E_MAIN_GROUP7_SIZE  BIT(2)
998 #define MLX5E_MAIN_GROUP8_SIZE  BIT(1)
999 #define MLX5E_MAIN_GROUP9_SIZE  BIT(0)
1000 #define MLX5E_MAIN_TABLE_SIZE   (MLX5E_MAIN_GROUP0_SIZE +\
1001                                  MLX5E_MAIN_GROUP1_SIZE +\
1002                                  MLX5E_MAIN_GROUP2_SIZE +\
1003                                  MLX5E_MAIN_GROUP3_SIZE +\
1004                                  MLX5E_MAIN_GROUP4_SIZE +\
1005                                  MLX5E_MAIN_GROUP5_SIZE +\
1006                                  MLX5E_MAIN_GROUP6_SIZE +\
1007                                  MLX5E_MAIN_GROUP7_SIZE +\
1008                                  MLX5E_MAIN_GROUP8_SIZE +\
1009                                  MLX5E_MAIN_GROUP9_SIZE +\
1010                                  0)
1011
1012 static int
1013 mlx5e_create_main_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1014                                       int inlen)
1015 {
1016         u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1017         u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
1018                                 match_criteria.outer_headers.dmac_47_16);
1019         int err;
1020         int ix = 0;
1021
1022         /* Tunnel rules need to be first in this list of groups */
1023
1024         /* Start tunnel rules */
1025         memset(in, 0, inlen);
1026         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1027         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1028         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1029         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
1030         MLX5_SET_CFG(in, start_flow_index, ix);
1031         ix += MLX5E_MAIN_GROUP0_SIZE;
1032         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1033         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1034         if (IS_ERR(ft->g[ft->num_groups]))
1035                 goto err_destory_groups;
1036         ft->num_groups++;
1037         /* End Tunnel Rules */
1038
1039         memset(in, 0, inlen);
1040         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1041         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1042         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1043         MLX5_SET_CFG(in, start_flow_index, ix);
1044         ix += MLX5E_MAIN_GROUP1_SIZE;
1045         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1046         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1047         if (IS_ERR(ft->g[ft->num_groups]))
1048                 goto err_destory_groups;
1049         ft->num_groups++;
1050
1051         memset(in, 0, inlen);
1052         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1053         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1054         MLX5_SET_CFG(in, start_flow_index, ix);
1055         ix += MLX5E_MAIN_GROUP2_SIZE;
1056         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1057         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1058         if (IS_ERR(ft->g[ft->num_groups]))
1059                 goto err_destory_groups;
1060         ft->num_groups++;
1061
1062         memset(in, 0, inlen);
1063         MLX5_SET_CFG(in, start_flow_index, ix);
1064         ix += MLX5E_MAIN_GROUP3_SIZE;
1065         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1066         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1067         if (IS_ERR(ft->g[ft->num_groups]))
1068                 goto err_destory_groups;
1069         ft->num_groups++;
1070
1071         memset(in, 0, inlen);
1072         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1073         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1074         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1075         memset(dmac, 0xff, ETH_ALEN);
1076         MLX5_SET_CFG(in, start_flow_index, ix);
1077         ix += MLX5E_MAIN_GROUP4_SIZE;
1078         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1079         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1080         if (IS_ERR(ft->g[ft->num_groups]))
1081                 goto err_destory_groups;
1082         ft->num_groups++;
1083
1084         memset(in, 0, inlen);
1085         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1086         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1087         memset(dmac, 0xff, ETH_ALEN);
1088         MLX5_SET_CFG(in, start_flow_index, ix);
1089         ix += MLX5E_MAIN_GROUP5_SIZE;
1090         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1091         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1092         if (IS_ERR(ft->g[ft->num_groups]))
1093                 goto err_destory_groups;
1094         ft->num_groups++;
1095
1096         memset(in, 0, inlen);
1097         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1098         memset(dmac, 0xff, ETH_ALEN);
1099         MLX5_SET_CFG(in, start_flow_index, ix);
1100         ix += MLX5E_MAIN_GROUP6_SIZE;
1101         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1102         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1103         if (IS_ERR(ft->g[ft->num_groups]))
1104                 goto err_destory_groups;
1105         ft->num_groups++;
1106
1107         memset(in, 0, inlen);
1108         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1109         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1110         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
1111         dmac[0] = 0x01;
1112         MLX5_SET_CFG(in, start_flow_index, ix);
1113         ix += MLX5E_MAIN_GROUP7_SIZE;
1114         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1115         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1116         if (IS_ERR(ft->g[ft->num_groups]))
1117                 goto err_destory_groups;
1118         ft->num_groups++;
1119
1120         memset(in, 0, inlen);
1121         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1122         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
1123         dmac[0] = 0x01;
1124         MLX5_SET_CFG(in, start_flow_index, ix);
1125         ix += MLX5E_MAIN_GROUP8_SIZE;
1126         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1127         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1128         if (IS_ERR(ft->g[ft->num_groups]))
1129                 goto err_destory_groups;
1130         ft->num_groups++;
1131
1132         memset(in, 0, inlen);
1133         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1134         dmac[0] = 0x01;
1135         MLX5_SET_CFG(in, start_flow_index, ix);
1136         ix += MLX5E_MAIN_GROUP9_SIZE;
1137         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1138         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1139         if (IS_ERR(ft->g[ft->num_groups]))
1140                 goto err_destory_groups;
1141         ft->num_groups++;
1142
1143         return (0);
1144
1145 err_destory_groups:
1146         err = PTR_ERR(ft->g[ft->num_groups]);
1147         ft->g[ft->num_groups] = NULL;
1148         mlx5e_destroy_groups(ft);
1149
1150         return (err);
1151 }
1152
1153 static int
1154 mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
1155 {
1156         u32 *in;
1157         int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1158         int err;
1159
1160         in = mlx5_vzalloc(inlen);
1161         if (!in)
1162                 return (-ENOMEM);
1163
1164         err = mlx5e_create_main_groups_sub(ft, in, inlen);
1165
1166         kvfree(in);
1167         return (err);
1168 }
1169
1170 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
1171 {
1172         struct mlx5e_flow_table *ft = &priv->fts.main;
1173         int err;
1174
1175         ft->num_groups = 0;
1176         ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "main",
1177                                        MLX5E_MAIN_TABLE_SIZE);
1178
1179         if (IS_ERR(ft->t)) {
1180                 err = PTR_ERR(ft->t);
1181                 ft->t = NULL;
1182                 return (err);
1183         }
1184         ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1185         if (!ft->g) {
1186                 err = -ENOMEM;
1187                 goto err_destroy_main_flow_table;
1188         }
1189
1190         err = mlx5e_create_main_groups(ft);
1191         if (err)
1192                 goto err_free_g;
1193         return (0);
1194
1195 err_free_g:
1196         kfree(ft->g);
1197
1198 err_destroy_main_flow_table:
1199         mlx5_destroy_flow_table(ft->t);
1200         ft->t = NULL;
1201
1202         return (err);
1203 }
1204
1205 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
1206 {
1207         mlx5e_destroy_flow_table(&priv->fts.main);
1208 }
1209
1210 #define MLX5E_NUM_VLAN_GROUPS   3
1211 #define MLX5E_VLAN_GROUP0_SIZE  BIT(12)
1212 #define MLX5E_VLAN_GROUP1_SIZE  BIT(1)
1213 #define MLX5E_VLAN_GROUP2_SIZE  BIT(0)
1214 #define MLX5E_VLAN_TABLE_SIZE   (MLX5E_VLAN_GROUP0_SIZE +\
1215                                  MLX5E_VLAN_GROUP1_SIZE +\
1216                                  MLX5E_VLAN_GROUP2_SIZE +\
1217                                  0)
1218
1219 static int
1220 mlx5e_create_vlan_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1221                                       int inlen)
1222 {
1223         int err;
1224         int ix = 0;
1225         u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1226
1227         memset(in, 0, inlen);
1228         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1229         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1230         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1231         MLX5_SET_CFG(in, start_flow_index, ix);
1232         ix += MLX5E_VLAN_GROUP0_SIZE;
1233         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1234         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1235         if (IS_ERR(ft->g[ft->num_groups]))
1236                 goto err_destory_groups;
1237         ft->num_groups++;
1238
1239         memset(in, 0, inlen);
1240         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1241         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1242         MLX5_SET_CFG(in, start_flow_index, ix);
1243         ix += MLX5E_VLAN_GROUP1_SIZE;
1244         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1245         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1246         if (IS_ERR(ft->g[ft->num_groups]))
1247                 goto err_destory_groups;
1248         ft->num_groups++;
1249
1250         memset(in, 0, inlen);
1251         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1252         MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1253         MLX5_SET_CFG(in, start_flow_index, ix);
1254         ix += MLX5E_VLAN_GROUP2_SIZE;
1255         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1256         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1257         if (IS_ERR(ft->g[ft->num_groups]))
1258                 goto err_destory_groups;
1259         ft->num_groups++;
1260
1261         return (0);
1262
1263 err_destory_groups:
1264         err = PTR_ERR(ft->g[ft->num_groups]);
1265         ft->g[ft->num_groups] = NULL;
1266         mlx5e_destroy_groups(ft);
1267
1268         return (err);
1269 }
1270
1271 static int
1272 mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
1273 {
1274         u32 *in;
1275         int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1276         int err;
1277
1278         in = mlx5_vzalloc(inlen);
1279         if (!in)
1280                 return (-ENOMEM);
1281
1282         err = mlx5e_create_vlan_groups_sub(ft, in, inlen);
1283
1284         kvfree(in);
1285         return (err);
1286 }
1287
1288 static int
1289 mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
1290 {
1291         struct mlx5e_flow_table *ft = &priv->fts.vlan;
1292         int err;
1293
1294         ft->num_groups = 0;
1295         ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
1296                                        MLX5E_VLAN_TABLE_SIZE);
1297
1298         if (IS_ERR(ft->t)) {
1299                 err = PTR_ERR(ft->t);
1300                 ft->t = NULL;
1301                 return (err);
1302         }
1303         ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1304         if (!ft->g) {
1305                 err = -ENOMEM;
1306                 goto err_destroy_vlan_flow_table;
1307         }
1308
1309         err = mlx5e_create_vlan_groups(ft);
1310         if (err)
1311                 goto err_free_g;
1312
1313         return (0);
1314
1315 err_free_g:
1316         kfree(ft->g);
1317
1318 err_destroy_vlan_flow_table:
1319         mlx5_destroy_flow_table(ft->t);
1320         ft->t = NULL;
1321
1322         return (err);
1323 }
1324
1325 static void
1326 mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
1327 {
1328         mlx5e_destroy_flow_table(&priv->fts.vlan);
1329 }
1330
1331 #define MLX5E_NUM_INNER_RSS_GROUPS      3
1332 #define MLX5E_INNER_RSS_GROUP0_SIZE     BIT(3)
1333 #define MLX5E_INNER_RSS_GROUP1_SIZE     BIT(1)
1334 #define MLX5E_INNER_RSS_GROUP2_SIZE     BIT(0)
1335 #define MLX5E_INNER_RSS_TABLE_SIZE      (MLX5E_INNER_RSS_GROUP0_SIZE +\
1336                                          MLX5E_INNER_RSS_GROUP1_SIZE +\
1337                                          MLX5E_INNER_RSS_GROUP2_SIZE +\
1338                                          0)
1339
1340 static int
1341 mlx5e_create_inner_rss_groups_sub(struct mlx5e_flow_table *ft, u32 *in,
1342                                            int inlen)
1343 {
1344         u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1345         int err;
1346         int ix = 0;
1347
1348         memset(in, 0, inlen);
1349         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1350         MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1351         MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1352         MLX5_SET_CFG(in, start_flow_index, ix);
1353         ix += MLX5E_INNER_RSS_GROUP0_SIZE;
1354         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1355         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1356         if (IS_ERR(ft->g[ft->num_groups]))
1357                 goto err_destory_groups;
1358         ft->num_groups++;
1359
1360         memset(in, 0, inlen);
1361         MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1362         MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
1363         MLX5_SET_CFG(in, start_flow_index, ix);
1364         ix += MLX5E_INNER_RSS_GROUP1_SIZE;
1365         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1366         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1367         if (IS_ERR(ft->g[ft->num_groups]))
1368                 goto err_destory_groups;
1369         ft->num_groups++;
1370
1371         memset(in, 0, inlen);
1372         MLX5_SET_CFG(in, start_flow_index, ix);
1373         ix += MLX5E_INNER_RSS_GROUP2_SIZE;
1374         MLX5_SET_CFG(in, end_flow_index, ix - 1);
1375         ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1376         if (IS_ERR(ft->g[ft->num_groups]))
1377                 goto err_destory_groups;
1378         ft->num_groups++;
1379
1380         return (0);
1381
1382 err_destory_groups:
1383         err = PTR_ERR(ft->g[ft->num_groups]);
1384         ft->g[ft->num_groups] = NULL;
1385         mlx5e_destroy_groups(ft);
1386
1387         return (err);
1388 }
1389
1390 static int
1391 mlx5e_create_inner_rss_groups(struct mlx5e_flow_table *ft)
1392 {
1393         u32 *in;
1394         int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1395         int err;
1396
1397         in = mlx5_vzalloc(inlen);
1398         if (!in)
1399                 return (-ENOMEM);
1400
1401         err = mlx5e_create_inner_rss_groups_sub(ft, in, inlen);
1402
1403         kvfree(in);
1404         return (err);
1405 }
1406
1407 static int
1408 mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
1409 {
1410         struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
1411         int err;
1412
1413         ft->num_groups = 0;
1414         ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
1415                                        MLX5E_INNER_RSS_TABLE_SIZE);
1416
1417         if (IS_ERR(ft->t)) {
1418                 err = PTR_ERR(ft->t);
1419                 ft->t = NULL;
1420                 return (err);
1421         }
1422         ft->g = kcalloc(MLX5E_NUM_INNER_RSS_GROUPS, sizeof(*ft->g),
1423                         GFP_KERNEL);
1424         if (!ft->g) {
1425                 err = -ENOMEM;
1426                 goto err_destroy_inner_rss_flow_table;
1427         }
1428
1429         err = mlx5e_create_inner_rss_groups(ft);
1430         if (err)
1431                 goto err_free_g;
1432
1433         return (0);
1434
1435 err_free_g:
1436         kfree(ft->g);
1437
1438 err_destroy_inner_rss_flow_table:
1439         mlx5_destroy_flow_table(ft->t);
1440         ft->t = NULL;
1441
1442         return (err);
1443 }
1444
1445 static void mlx5e_destroy_inner_rss_flow_table(struct mlx5e_priv *priv)
1446 {
1447         mlx5e_destroy_flow_table(&priv->fts.inner_rss);
1448 }
1449
1450 int
1451 mlx5e_open_flow_table(struct mlx5e_priv *priv)
1452 {
1453         int err;
1454
1455         priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
1456                                                MLX5_FLOW_NAMESPACE_KERNEL);
1457
1458         err = mlx5e_create_vlan_flow_table(priv);
1459         if (err)
1460                 return (err);
1461
1462         err = mlx5e_create_main_flow_table(priv);
1463         if (err)
1464                 goto err_destroy_vlan_flow_table;
1465
1466         err = mlx5e_create_inner_rss_flow_table(priv);
1467         if (err)
1468                 goto err_destroy_main_flow_table;
1469
1470         return (0);
1471
1472 err_destroy_main_flow_table:
1473         mlx5e_destroy_main_flow_table(priv);
1474 err_destroy_vlan_flow_table:
1475         mlx5e_destroy_vlan_flow_table(priv);
1476
1477         return (err);
1478 }
1479
1480 void
1481 mlx5e_close_flow_table(struct mlx5e_priv *priv)
1482 {
1483         mlx5e_destroy_inner_rss_flow_table(priv);
1484         mlx5e_destroy_main_flow_table(priv);
1485         mlx5e_destroy_vlan_flow_table(priv);
1486 }