]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/mlx5/mlx5_en/mlx5_en_main.c
MFC r332003:
[FreeBSD/FreeBSD.git] / sys / dev / mlx5 / mlx5_en / mlx5_en_main.c
1 /*-
2  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27
28 #include "en.h"
29
30 #include <sys/sockio.h>
31 #include <machine/atomic.h>
32
33 #ifndef ETH_DRIVER_VERSION
34 #define ETH_DRIVER_VERSION      "3.4.1"
35 #endif
36 char mlx5e_version[] = "Mellanox Ethernet driver"
37     " (" ETH_DRIVER_VERSION ")";
38
39 struct mlx5e_channel_param {
40         struct mlx5e_rq_param rq;
41         struct mlx5e_sq_param sq;
42         struct mlx5e_cq_param rx_cq;
43         struct mlx5e_cq_param tx_cq;
44 };
45
46 static const struct {
47         u32     subtype;
48         u64     baudrate;
49 }       mlx5e_mode_table[MLX5E_LINK_MODES_NUMBER] = {
50
51         [MLX5E_1000BASE_CX_SGMII] = {
52                 .subtype = IFM_1000_CX_SGMII,
53                 .baudrate = IF_Mbps(1000ULL),
54         },
55         [MLX5E_1000BASE_KX] = {
56                 .subtype = IFM_1000_KX,
57                 .baudrate = IF_Mbps(1000ULL),
58         },
59         [MLX5E_10GBASE_CX4] = {
60                 .subtype = IFM_10G_CX4,
61                 .baudrate = IF_Gbps(10ULL),
62         },
63         [MLX5E_10GBASE_KX4] = {
64                 .subtype = IFM_10G_KX4,
65                 .baudrate = IF_Gbps(10ULL),
66         },
67         [MLX5E_10GBASE_KR] = {
68                 .subtype = IFM_10G_KR,
69                 .baudrate = IF_Gbps(10ULL),
70         },
71         [MLX5E_20GBASE_KR2] = {
72                 .subtype = IFM_20G_KR2,
73                 .baudrate = IF_Gbps(20ULL),
74         },
75         [MLX5E_40GBASE_CR4] = {
76                 .subtype = IFM_40G_CR4,
77                 .baudrate = IF_Gbps(40ULL),
78         },
79         [MLX5E_40GBASE_KR4] = {
80                 .subtype = IFM_40G_KR4,
81                 .baudrate = IF_Gbps(40ULL),
82         },
83         [MLX5E_56GBASE_R4] = {
84                 .subtype = IFM_56G_R4,
85                 .baudrate = IF_Gbps(56ULL),
86         },
87         [MLX5E_10GBASE_CR] = {
88                 .subtype = IFM_10G_CR1,
89                 .baudrate = IF_Gbps(10ULL),
90         },
91         [MLX5E_10GBASE_SR] = {
92                 .subtype = IFM_10G_SR,
93                 .baudrate = IF_Gbps(10ULL),
94         },
95         [MLX5E_10GBASE_ER] = {
96                 .subtype = IFM_10G_ER,
97                 .baudrate = IF_Gbps(10ULL),
98         },
99         [MLX5E_40GBASE_SR4] = {
100                 .subtype = IFM_40G_SR4,
101                 .baudrate = IF_Gbps(40ULL),
102         },
103         [MLX5E_40GBASE_LR4] = {
104                 .subtype = IFM_40G_LR4,
105                 .baudrate = IF_Gbps(40ULL),
106         },
107         [MLX5E_100GBASE_CR4] = {
108                 .subtype = IFM_100G_CR4,
109                 .baudrate = IF_Gbps(100ULL),
110         },
111         [MLX5E_100GBASE_SR4] = {
112                 .subtype = IFM_100G_SR4,
113                 .baudrate = IF_Gbps(100ULL),
114         },
115         [MLX5E_100GBASE_KR4] = {
116                 .subtype = IFM_100G_KR4,
117                 .baudrate = IF_Gbps(100ULL),
118         },
119         [MLX5E_100GBASE_LR4] = {
120                 .subtype = IFM_100G_LR4,
121                 .baudrate = IF_Gbps(100ULL),
122         },
123         [MLX5E_100BASE_TX] = {
124                 .subtype = IFM_100_TX,
125                 .baudrate = IF_Mbps(100ULL),
126         },
127         [MLX5E_1000BASE_T] = {
128                 .subtype = IFM_1000_T,
129                 .baudrate = IF_Mbps(1000ULL),
130         },
131         [MLX5E_10GBASE_T] = {
132                 .subtype = IFM_10G_T,
133                 .baudrate = IF_Gbps(10ULL),
134         },
135         [MLX5E_25GBASE_CR] = {
136                 .subtype = IFM_25G_CR,
137                 .baudrate = IF_Gbps(25ULL),
138         },
139         [MLX5E_25GBASE_KR] = {
140                 .subtype = IFM_25G_KR,
141                 .baudrate = IF_Gbps(25ULL),
142         },
143         [MLX5E_25GBASE_SR] = {
144                 .subtype = IFM_25G_SR,
145                 .baudrate = IF_Gbps(25ULL),
146         },
147         [MLX5E_50GBASE_CR2] = {
148                 .subtype = IFM_50G_CR2,
149                 .baudrate = IF_Gbps(50ULL),
150         },
151         [MLX5E_50GBASE_KR2] = {
152                 .subtype = IFM_50G_KR2,
153                 .baudrate = IF_Gbps(50ULL),
154         },
155 };
156
157 MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
158
159 static void
160 mlx5e_update_carrier(struct mlx5e_priv *priv)
161 {
162         struct mlx5_core_dev *mdev = priv->mdev;
163         u32 out[MLX5_ST_SZ_DW(ptys_reg)];
164         u32 eth_proto_oper;
165         int error;
166         u8 port_state;
167         u8 i;
168
169         port_state = mlx5_query_vport_state(mdev,
170             MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
171
172         if (port_state == VPORT_STATE_UP) {
173                 priv->media_status_last |= IFM_ACTIVE;
174         } else {
175                 priv->media_status_last &= ~IFM_ACTIVE;
176                 priv->media_active_last = IFM_ETHER;
177                 if_link_state_change(priv->ifp, LINK_STATE_DOWN);
178                 return;
179         }
180
181         error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
182         if (error) {
183                 priv->media_active_last = IFM_ETHER;
184                 priv->ifp->if_baudrate = 1;
185                 if_printf(priv->ifp, "%s: query port ptys failed: 0x%x\n",
186                     __func__, error);
187                 return;
188         }
189         eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
190
191         for (i = 0; i != MLX5E_LINK_MODES_NUMBER; i++) {
192                 if (mlx5e_mode_table[i].baudrate == 0)
193                         continue;
194                 if (MLX5E_PROT_MASK(i) & eth_proto_oper) {
195                         priv->ifp->if_baudrate =
196                             mlx5e_mode_table[i].baudrate;
197                         priv->media_active_last =
198                             mlx5e_mode_table[i].subtype | IFM_ETHER | IFM_FDX;
199                 }
200         }
201         if_link_state_change(priv->ifp, LINK_STATE_UP);
202 }
203
204 static void
205 mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
206 {
207         struct mlx5e_priv *priv = dev->if_softc;
208
209         ifmr->ifm_status = priv->media_status_last;
210         ifmr->ifm_active = priv->media_active_last |
211             (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
212             (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
213
214 }
215
216 static u32
217 mlx5e_find_link_mode(u32 subtype)
218 {
219         u32 i;
220         u32 link_mode = 0;
221
222         for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
223                 if (mlx5e_mode_table[i].baudrate == 0)
224                         continue;
225                 if (mlx5e_mode_table[i].subtype == subtype)
226                         link_mode |= MLX5E_PROT_MASK(i);
227         }
228
229         return (link_mode);
230 }
231
232 static int
233 mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv)
234 {
235         return (mlx5_set_port_pause_and_pfc(priv->mdev, 1,
236             priv->params.rx_pauseframe_control,
237             priv->params.tx_pauseframe_control,
238             priv->params.rx_priority_flow_control,
239             priv->params.tx_priority_flow_control));
240 }
241
242 static int
243 mlx5e_set_port_pfc(struct mlx5e_priv *priv)
244 {
245         int error;
246
247         if (priv->params.rx_pauseframe_control ||
248             priv->params.tx_pauseframe_control) {
249                 if_printf(priv->ifp,
250                     "Global pauseframes must be disabled before enabling PFC.\n");
251                 error = -EINVAL;
252         } else {
253                 error = mlx5e_set_port_pause_and_pfc(priv);
254         }
255         return (error);
256 }
257
258 static int
259 mlx5e_media_change(struct ifnet *dev)
260 {
261         struct mlx5e_priv *priv = dev->if_softc;
262         struct mlx5_core_dev *mdev = priv->mdev;
263         u32 eth_proto_cap;
264         u32 link_mode;
265         int was_opened;
266         int locked;
267         int error;
268
269         locked = PRIV_LOCKED(priv);
270         if (!locked)
271                 PRIV_LOCK(priv);
272
273         if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
274                 error = EINVAL;
275                 goto done;
276         }
277         link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media));
278
279         /* query supported capabilities */
280         error = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
281         if (error != 0) {
282                 if_printf(dev, "Query port media capability failed\n");
283                 goto done;
284         }
285         /* check for autoselect */
286         if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) {
287                 link_mode = eth_proto_cap;
288                 if (link_mode == 0) {
289                         if_printf(dev, "Port media capability is zero\n");
290                         error = EINVAL;
291                         goto done;
292                 }
293         } else {
294                 link_mode = link_mode & eth_proto_cap;
295                 if (link_mode == 0) {
296                         if_printf(dev, "Not supported link mode requested\n");
297                         error = EINVAL;
298                         goto done;
299                 }
300         }
301         if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
302                 /* check if PFC is enabled */
303                 if (priv->params.rx_priority_flow_control ||
304                     priv->params.tx_priority_flow_control) {
305                         if_printf(dev, "PFC must be disabled before enabling global pauseframes.\n");
306                         error = EINVAL;
307                         goto done;
308                 }
309         }
310         /* update pauseframe control bits */
311         priv->params.rx_pauseframe_control =
312             (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0;
313         priv->params.tx_pauseframe_control =
314             (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0;
315
316         /* check if device is opened */
317         was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
318
319         /* reconfigure the hardware */
320         mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
321         mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN);
322         error = -mlx5e_set_port_pause_and_pfc(priv);
323         if (was_opened)
324                 mlx5_set_port_status(mdev, MLX5_PORT_UP);
325
326 done:
327         if (!locked)
328                 PRIV_UNLOCK(priv);
329         return (error);
330 }
331
332 static void
333 mlx5e_update_carrier_work(struct work_struct *work)
334 {
335         struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
336             update_carrier_work);
337
338         PRIV_LOCK(priv);
339         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
340                 mlx5e_update_carrier(priv);
341         PRIV_UNLOCK(priv);
342 }
343
344 /*
345  * This function reads the physical port counters from the firmware
346  * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
347  * macros. The output is converted from big-endian 64-bit values into
348  * host endian ones and stored in the "priv->stats.pport" structure.
349  */
350 static void
351 mlx5e_update_pport_counters(struct mlx5e_priv *priv)
352 {
353         struct mlx5_core_dev *mdev = priv->mdev;
354         struct mlx5e_pport_stats *s = &priv->stats.pport;
355         struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
356         u32 *in;
357         u32 *out;
358         const u64 *ptr;
359         unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
360         unsigned x;
361         unsigned y;
362         unsigned z;
363
364         /* allocate firmware request structures */
365         in = mlx5_vzalloc(sz);
366         out = mlx5_vzalloc(sz);
367         if (in == NULL || out == NULL)
368                 goto free_out;
369
370         /*
371          * Get pointer to the 64-bit counter set which is located at a
372          * fixed offset in the output firmware request structure:
373          */
374         ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
375
376         MLX5_SET(ppcnt_reg, in, local_port, 1);
377
378         /* read IEEE802_3 counter group using predefined counter layout */
379         MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
380         mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
381         for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM;
382              x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
383                 s->arg[y] = be64toh(ptr[x]);
384
385         /* read RFC2819 counter group using predefined counter layout */
386         MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
387         mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
388         for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
389                 s->arg[y] = be64toh(ptr[x]);
390         for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
391             MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
392                 s_debug->arg[y] = be64toh(ptr[x]);
393
394         /* read RFC2863 counter group using predefined counter layout */
395         MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
396         mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
397         for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
398                 s_debug->arg[y] = be64toh(ptr[x]);
399
400         /* read physical layer stats counter group using predefined counter layout */
401         MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
402         mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
403         for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
404                 s_debug->arg[y] = be64toh(ptr[x]);
405
406         /* read per-priority counters */
407         MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
408
409         /* iterate all the priorities */
410         for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) {
411                 MLX5_SET(ppcnt_reg, in, prio_tc, z);
412                 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
413
414                 /* read per priority stats counter group using predefined counter layout */
415                 for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM /
416                     MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++)
417                         s->arg[y] = be64toh(ptr[x]);
418         }
419 free_out:
420         /* free firmware request structures */
421         kvfree(in);
422         kvfree(out);
423 }
424
425 /*
426  * This function is called regularly to collect all statistics
427  * counters from the firmware. The values can be viewed through the
428  * sysctl interface. Execution is serialized using the priv's global
429  * configuration lock.
430  */
431 static void
432 mlx5e_update_stats_work(struct work_struct *work)
433 {
434         struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
435             update_stats_work);
436         struct mlx5_core_dev *mdev = priv->mdev;
437         struct mlx5e_vport_stats *s = &priv->stats.vport;
438         struct mlx5e_rq_stats *rq_stats;
439         struct mlx5e_sq_stats *sq_stats;
440         struct buf_ring *sq_br;
441 #if (__FreeBSD_version < 1100000)
442         struct ifnet *ifp = priv->ifp;
443 #endif
444
445         u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
446         u32 *out;
447         int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
448         u64 tso_packets = 0;
449         u64 tso_bytes = 0;
450         u64 tx_queue_dropped = 0;
451         u64 tx_defragged = 0;
452         u64 tx_offload_none = 0;
453         u64 lro_packets = 0;
454         u64 lro_bytes = 0;
455         u64 sw_lro_queued = 0;
456         u64 sw_lro_flushed = 0;
457         u64 rx_csum_none = 0;
458         u64 rx_wqe_err = 0;
459         u32 rx_out_of_buffer = 0;
460         int i;
461         int j;
462
463         PRIV_LOCK(priv);
464         out = mlx5_vzalloc(outlen);
465         if (out == NULL)
466                 goto free_out;
467         if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
468                 goto free_out;
469
470         /* Collect firts the SW counters and then HW for consistency */
471         for (i = 0; i < priv->params.num_channels; i++) {
472                 struct mlx5e_rq *rq = &priv->channel[i]->rq;
473
474                 rq_stats = &priv->channel[i]->rq.stats;
475
476                 /* collect stats from LRO */
477                 rq_stats->sw_lro_queued = rq->lro.lro_queued;
478                 rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
479                 sw_lro_queued += rq_stats->sw_lro_queued;
480                 sw_lro_flushed += rq_stats->sw_lro_flushed;
481                 lro_packets += rq_stats->lro_packets;
482                 lro_bytes += rq_stats->lro_bytes;
483                 rx_csum_none += rq_stats->csum_none;
484                 rx_wqe_err += rq_stats->wqe_err;
485
486                 for (j = 0; j < priv->num_tc; j++) {
487                         sq_stats = &priv->channel[i]->sq[j].stats;
488                         sq_br = priv->channel[i]->sq[j].br;
489
490                         tso_packets += sq_stats->tso_packets;
491                         tso_bytes += sq_stats->tso_bytes;
492                         tx_queue_dropped += sq_stats->dropped;
493                         if (sq_br != NULL)
494                                 tx_queue_dropped += sq_br->br_drops;
495                         tx_defragged += sq_stats->defragged;
496                         tx_offload_none += sq_stats->csum_offload_none;
497                 }
498         }
499
500         /* update counters */
501         s->tso_packets = tso_packets;
502         s->tso_bytes = tso_bytes;
503         s->tx_queue_dropped = tx_queue_dropped;
504         s->tx_defragged = tx_defragged;
505         s->lro_packets = lro_packets;
506         s->lro_bytes = lro_bytes;
507         s->sw_lro_queued = sw_lro_queued;
508         s->sw_lro_flushed = sw_lro_flushed;
509         s->rx_csum_none = rx_csum_none;
510         s->rx_wqe_err = rx_wqe_err;
511
512         /* HW counters */
513         memset(in, 0, sizeof(in));
514
515         MLX5_SET(query_vport_counter_in, in, opcode,
516             MLX5_CMD_OP_QUERY_VPORT_COUNTER);
517         MLX5_SET(query_vport_counter_in, in, op_mod, 0);
518         MLX5_SET(query_vport_counter_in, in, other_vport, 0);
519
520         memset(out, 0, outlen);
521
522         /* get number of out-of-buffer drops first */
523         if (mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
524             &rx_out_of_buffer))
525                 goto free_out;
526
527         /* accumulate difference into a 64-bit counter */
528         s->rx_out_of_buffer += (u64)(u32)(rx_out_of_buffer - s->rx_out_of_buffer_prev);
529         s->rx_out_of_buffer_prev = rx_out_of_buffer;
530
531         /* get port statistics */
532         if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
533                 goto free_out;
534
535 #define MLX5_GET_CTR(out, x) \
536         MLX5_GET64(query_vport_counter_out, out, x)
537
538         s->rx_error_packets =
539             MLX5_GET_CTR(out, received_errors.packets);
540         s->rx_error_bytes =
541             MLX5_GET_CTR(out, received_errors.octets);
542         s->tx_error_packets =
543             MLX5_GET_CTR(out, transmit_errors.packets);
544         s->tx_error_bytes =
545             MLX5_GET_CTR(out, transmit_errors.octets);
546
547         s->rx_unicast_packets =
548             MLX5_GET_CTR(out, received_eth_unicast.packets);
549         s->rx_unicast_bytes =
550             MLX5_GET_CTR(out, received_eth_unicast.octets);
551         s->tx_unicast_packets =
552             MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
553         s->tx_unicast_bytes =
554             MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
555
556         s->rx_multicast_packets =
557             MLX5_GET_CTR(out, received_eth_multicast.packets);
558         s->rx_multicast_bytes =
559             MLX5_GET_CTR(out, received_eth_multicast.octets);
560         s->tx_multicast_packets =
561             MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
562         s->tx_multicast_bytes =
563             MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
564
565         s->rx_broadcast_packets =
566             MLX5_GET_CTR(out, received_eth_broadcast.packets);
567         s->rx_broadcast_bytes =
568             MLX5_GET_CTR(out, received_eth_broadcast.octets);
569         s->tx_broadcast_packets =
570             MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
571         s->tx_broadcast_bytes =
572             MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
573
574         s->rx_packets =
575             s->rx_unicast_packets +
576             s->rx_multicast_packets +
577             s->rx_broadcast_packets -
578             s->rx_out_of_buffer;
579         s->rx_bytes =
580             s->rx_unicast_bytes +
581             s->rx_multicast_bytes +
582             s->rx_broadcast_bytes;
583         s->tx_packets =
584             s->tx_unicast_packets +
585             s->tx_multicast_packets +
586             s->tx_broadcast_packets;
587         s->tx_bytes =
588             s->tx_unicast_bytes +
589             s->tx_multicast_bytes +
590             s->tx_broadcast_bytes;
591
592         /* Update calculated offload counters */
593         s->tx_csum_offload = s->tx_packets - tx_offload_none;
594         s->rx_csum_good = s->rx_packets - s->rx_csum_none;
595
596         /* Get physical port counters */
597         mlx5e_update_pport_counters(priv);
598
599 #if (__FreeBSD_version < 1100000)
600         /* no get_counters interface in fbsd 10 */
601         ifp->if_ipackets = s->rx_packets;
602         ifp->if_ierrors = s->rx_error_packets +
603             priv->stats.pport.alignment_err +
604             priv->stats.pport.check_seq_err +
605             priv->stats.pport.crc_align_errors +
606             priv->stats.pport.in_range_len_errors +
607             priv->stats.pport.jabbers +
608             priv->stats.pport.out_of_range_len +
609             priv->stats.pport.oversize_pkts +
610             priv->stats.pport.symbol_err +
611             priv->stats.pport.too_long_errors +
612             priv->stats.pport.undersize_pkts +
613             priv->stats.pport.unsupported_op_rx;
614         ifp->if_iqdrops = s->rx_out_of_buffer +
615             priv->stats.pport.drop_events;
616         ifp->if_opackets = s->tx_packets;
617         ifp->if_oerrors = s->tx_error_packets;
618         ifp->if_snd.ifq_drops = s->tx_queue_dropped;
619         ifp->if_ibytes = s->rx_bytes;
620         ifp->if_obytes = s->tx_bytes;
621         ifp->if_collisions =
622             priv->stats.pport.collisions;
623 #endif
624
625 free_out:
626         kvfree(out);
627
628         /* Update diagnostics, if any */
629         if (priv->params_ethtool.diag_pci_enable ||
630             priv->params_ethtool.diag_general_enable) {
631                 int error = mlx5_core_get_diagnostics_full(mdev,
632                     priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL,
633                     priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL);
634                 if (error != 0)
635                         if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error);
636         }
637         PRIV_UNLOCK(priv);
638 }
639
640 static void
641 mlx5e_update_stats(void *arg)
642 {
643         struct mlx5e_priv *priv = arg;
644
645         queue_work(priv->wq, &priv->update_stats_work);
646
647         callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv);
648 }
649
650 static void
651 mlx5e_async_event_sub(struct mlx5e_priv *priv,
652     enum mlx5_dev_event event)
653 {
654         switch (event) {
655         case MLX5_DEV_EVENT_PORT_UP:
656         case MLX5_DEV_EVENT_PORT_DOWN:
657                 queue_work(priv->wq, &priv->update_carrier_work);
658                 break;
659
660         default:
661                 break;
662         }
663 }
664
665 static void
666 mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
667     enum mlx5_dev_event event, unsigned long param)
668 {
669         struct mlx5e_priv *priv = vpriv;
670
671         mtx_lock(&priv->async_events_mtx);
672         if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
673                 mlx5e_async_event_sub(priv, event);
674         mtx_unlock(&priv->async_events_mtx);
675 }
676
677 static void
678 mlx5e_enable_async_events(struct mlx5e_priv *priv)
679 {
680         set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
681 }
682
683 static void
684 mlx5e_disable_async_events(struct mlx5e_priv *priv)
685 {
686         mtx_lock(&priv->async_events_mtx);
687         clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
688         mtx_unlock(&priv->async_events_mtx);
689 }
690
691 static const char *mlx5e_rq_stats_desc[] = {
692         MLX5E_RQ_STATS(MLX5E_STATS_DESC)
693 };
694
695 static int
696 mlx5e_create_rq(struct mlx5e_channel *c,
697     struct mlx5e_rq_param *param,
698     struct mlx5e_rq *rq)
699 {
700         struct mlx5e_priv *priv = c->priv;
701         struct mlx5_core_dev *mdev = priv->mdev;
702         char buffer[16];
703         void *rqc = param->rqc;
704         void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
705         int wq_sz;
706         int err;
707         int i;
708
709         /* Create DMA descriptor TAG */
710         if ((err = -bus_dma_tag_create(
711             bus_get_dma_tag(mdev->pdev->dev.bsddev),
712             1,                          /* any alignment */
713             0,                          /* no boundary */
714             BUS_SPACE_MAXADDR,          /* lowaddr */
715             BUS_SPACE_MAXADDR,          /* highaddr */
716             NULL, NULL,                 /* filter, filterarg */
717             MJUM16BYTES,                /* maxsize */
718             1,                          /* nsegments */
719             MJUM16BYTES,                /* maxsegsize */
720             0,                          /* flags */
721             NULL, NULL,                 /* lockfunc, lockfuncarg */
722             &rq->dma_tag)))
723                 goto done;
724
725         err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
726             &rq->wq_ctrl);
727         if (err)
728                 goto err_free_dma_tag;
729
730         rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
731
732         if (priv->params.hw_lro_en) {
733                 rq->wqe_sz = priv->params.lro_wqe_sz;
734         } else {
735                 rq->wqe_sz = MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
736         }
737         if (rq->wqe_sz > MJUM16BYTES) {
738                 err = -ENOMEM;
739                 goto err_rq_wq_destroy;
740         } else if (rq->wqe_sz > MJUM9BYTES) {
741                 rq->wqe_sz = MJUM16BYTES;
742         } else if (rq->wqe_sz > MJUMPAGESIZE) {
743                 rq->wqe_sz = MJUM9BYTES;
744         } else if (rq->wqe_sz > MCLBYTES) {
745                 rq->wqe_sz = MJUMPAGESIZE;
746         } else {
747                 rq->wqe_sz = MCLBYTES;
748         }
749
750         wq_sz = mlx5_wq_ll_get_size(&rq->wq);
751
752         err = -tcp_lro_init_args(&rq->lro, c->ifp, TCP_LRO_ENTRIES, wq_sz);
753         if (err)
754                 goto err_rq_wq_destroy;
755
756         rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
757         for (i = 0; i != wq_sz; i++) {
758                 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
759                 uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
760
761                 err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
762                 if (err != 0) {
763                         while (i--)
764                                 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
765                         goto err_rq_mbuf_free;
766                 }
767                 wqe->data.lkey = c->mkey_be;
768                 wqe->data.byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
769         }
770
771         rq->ifp = c->ifp;
772         rq->channel = c;
773         rq->ix = c->ix;
774
775         snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
776         mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
777             buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
778             rq->stats.arg);
779         return (0);
780
781 err_rq_mbuf_free:
782         free(rq->mbuf, M_MLX5EN);
783         tcp_lro_free(&rq->lro);
784 err_rq_wq_destroy:
785         mlx5_wq_destroy(&rq->wq_ctrl);
786 err_free_dma_tag:
787         bus_dma_tag_destroy(rq->dma_tag);
788 done:
789         return (err);
790 }
791
792 static void
793 mlx5e_destroy_rq(struct mlx5e_rq *rq)
794 {
795         int wq_sz;
796         int i;
797
798         /* destroy all sysctl nodes */
799         sysctl_ctx_free(&rq->stats.ctx);
800
801         /* free leftover LRO packets, if any */
802         tcp_lro_free(&rq->lro);
803
804         wq_sz = mlx5_wq_ll_get_size(&rq->wq);
805         for (i = 0; i != wq_sz; i++) {
806                 if (rq->mbuf[i].mbuf != NULL) {
807                         bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map);
808                         m_freem(rq->mbuf[i].mbuf);
809                 }
810                 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
811         }
812         free(rq->mbuf, M_MLX5EN);
813         mlx5_wq_destroy(&rq->wq_ctrl);
814 }
815
816 static int
817 mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
818 {
819         struct mlx5e_channel *c = rq->channel;
820         struct mlx5e_priv *priv = c->priv;
821         struct mlx5_core_dev *mdev = priv->mdev;
822
823         void *in;
824         void *rqc;
825         void *wq;
826         int inlen;
827         int err;
828
829         inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
830             sizeof(u64) * rq->wq_ctrl.buf.npages;
831         in = mlx5_vzalloc(inlen);
832         if (in == NULL)
833                 return (-ENOMEM);
834
835         rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
836         wq = MLX5_ADDR_OF(rqc, rqc, wq);
837
838         memcpy(rqc, param->rqc, sizeof(param->rqc));
839
840         MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
841         MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
842         MLX5_SET(rqc, rqc, flush_in_error_en, 1);
843         if (priv->counter_set_id >= 0)
844                 MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
845         MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
846             PAGE_SHIFT);
847         MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
848
849         mlx5_fill_page_array(&rq->wq_ctrl.buf,
850             (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
851
852         err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
853
854         kvfree(in);
855
856         return (err);
857 }
858
859 static int
860 mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
861 {
862         struct mlx5e_channel *c = rq->channel;
863         struct mlx5e_priv *priv = c->priv;
864         struct mlx5_core_dev *mdev = priv->mdev;
865
866         void *in;
867         void *rqc;
868         int inlen;
869         int err;
870
871         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
872         in = mlx5_vzalloc(inlen);
873         if (in == NULL)
874                 return (-ENOMEM);
875
876         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
877
878         MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
879         MLX5_SET(modify_rq_in, in, rq_state, curr_state);
880         MLX5_SET(rqc, rqc, state, next_state);
881
882         err = mlx5_core_modify_rq(mdev, in, inlen);
883
884         kvfree(in);
885
886         return (err);
887 }
888
889 static void
890 mlx5e_disable_rq(struct mlx5e_rq *rq)
891 {
892         struct mlx5e_channel *c = rq->channel;
893         struct mlx5e_priv *priv = c->priv;
894         struct mlx5_core_dev *mdev = priv->mdev;
895
896         mlx5_core_destroy_rq(mdev, rq->rqn);
897 }
898
899 static int
900 mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
901 {
902         struct mlx5e_channel *c = rq->channel;
903         struct mlx5e_priv *priv = c->priv;
904         struct mlx5_wq_ll *wq = &rq->wq;
905         int i;
906
907         for (i = 0; i < 1000; i++) {
908                 if (wq->cur_sz >= priv->params.min_rx_wqes)
909                         return (0);
910
911                 msleep(4);
912         }
913         return (-ETIMEDOUT);
914 }
915
916 static int
917 mlx5e_open_rq(struct mlx5e_channel *c,
918     struct mlx5e_rq_param *param,
919     struct mlx5e_rq *rq)
920 {
921         int err;
922
923         err = mlx5e_create_rq(c, param, rq);
924         if (err)
925                 return (err);
926
927         err = mlx5e_enable_rq(rq, param);
928         if (err)
929                 goto err_destroy_rq;
930
931         err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
932         if (err)
933                 goto err_disable_rq;
934
935         c->rq.enabled = 1;
936
937         return (0);
938
939 err_disable_rq:
940         mlx5e_disable_rq(rq);
941 err_destroy_rq:
942         mlx5e_destroy_rq(rq);
943
944         return (err);
945 }
946
947 static void
948 mlx5e_close_rq(struct mlx5e_rq *rq)
949 {
950         mtx_lock(&rq->mtx);
951         rq->enabled = 0;
952         callout_stop(&rq->watchdog);
953         mtx_unlock(&rq->mtx);
954
955         callout_drain(&rq->watchdog);
956
957         mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
958 }
959
960 static void
961 mlx5e_close_rq_wait(struct mlx5e_rq *rq)
962 {
963         struct mlx5_core_dev *mdev = rq->channel->priv->mdev;
964
965         /* wait till RQ is empty */
966         while (!mlx5_wq_ll_is_empty(&rq->wq) &&
967                (mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
968                 msleep(4);
969                 rq->cq.mcq.comp(&rq->cq.mcq);
970         }
971
972         mlx5e_disable_rq(rq);
973         mlx5e_destroy_rq(rq);
974 }
975
976 void
977 mlx5e_free_sq_db(struct mlx5e_sq *sq)
978 {
979         int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
980         int x;
981
982         for (x = 0; x != wq_sz; x++)
983                 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
984         free(sq->mbuf, M_MLX5EN);
985 }
986
987 int
988 mlx5e_alloc_sq_db(struct mlx5e_sq *sq)
989 {
990         int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
991         int err;
992         int x;
993
994         sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
995
996         /* Create DMA descriptor MAPs */
997         for (x = 0; x != wq_sz; x++) {
998                 err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
999                 if (err != 0) {
1000                         while (x--)
1001                                 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
1002                         free(sq->mbuf, M_MLX5EN);
1003                         return (err);
1004                 }
1005         }
1006         return (0);
1007 }
1008
1009 static const char *mlx5e_sq_stats_desc[] = {
1010         MLX5E_SQ_STATS(MLX5E_STATS_DESC)
1011 };
1012
1013 static int
1014 mlx5e_create_sq(struct mlx5e_channel *c,
1015     int tc,
1016     struct mlx5e_sq_param *param,
1017     struct mlx5e_sq *sq)
1018 {
1019         struct mlx5e_priv *priv = c->priv;
1020         struct mlx5_core_dev *mdev = priv->mdev;
1021         char buffer[16];
1022
1023         void *sqc = param->sqc;
1024         void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
1025 #ifdef RSS
1026         cpuset_t cpu_mask;
1027         int cpu_id;
1028 #endif
1029         int err;
1030
1031         /* Create DMA descriptor TAG */
1032         if ((err = -bus_dma_tag_create(
1033             bus_get_dma_tag(mdev->pdev->dev.bsddev),
1034             1,                          /* any alignment */
1035             0,                          /* no boundary */
1036             BUS_SPACE_MAXADDR,          /* lowaddr */
1037             BUS_SPACE_MAXADDR,          /* highaddr */
1038             NULL, NULL,                 /* filter, filterarg */
1039             MLX5E_MAX_TX_PAYLOAD_SIZE,  /* maxsize */
1040             MLX5E_MAX_TX_MBUF_FRAGS,    /* nsegments */
1041             MLX5E_MAX_TX_MBUF_SIZE,     /* maxsegsize */
1042             0,                          /* flags */
1043             NULL, NULL,                 /* lockfunc, lockfuncarg */
1044             &sq->dma_tag)))
1045                 goto done;
1046
1047         err = mlx5_alloc_map_uar(mdev, &sq->uar);
1048         if (err)
1049                 goto err_free_dma_tag;
1050
1051         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
1052             &sq->wq_ctrl);
1053         if (err)
1054                 goto err_unmap_free_uar;
1055
1056         sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1057         sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
1058
1059         err = mlx5e_alloc_sq_db(sq);
1060         if (err)
1061                 goto err_sq_wq_destroy;
1062
1063         sq->mkey_be = c->mkey_be;
1064         sq->ifp = priv->ifp;
1065         sq->priv = priv;
1066         sq->tc = tc;
1067
1068         /* check if we should allocate a second packet buffer */
1069         if (priv->params_ethtool.tx_bufring_disable == 0) {
1070                 sq->br = buf_ring_alloc(MLX5E_SQ_TX_QUEUE_SIZE, M_MLX5EN,
1071                     M_WAITOK, &sq->lock);
1072                 if (sq->br == NULL) {
1073                         if_printf(c->ifp, "%s: Failed allocating sq drbr buffer\n",
1074                             __func__);
1075                         err = -ENOMEM;
1076                         goto err_free_sq_db;
1077                 }
1078
1079                 sq->sq_tq = taskqueue_create_fast("mlx5e_que", M_WAITOK,
1080                     taskqueue_thread_enqueue, &sq->sq_tq);
1081                 if (sq->sq_tq == NULL) {
1082                         if_printf(c->ifp, "%s: Failed allocating taskqueue\n",
1083                             __func__);
1084                         err = -ENOMEM;
1085                         goto err_free_drbr;
1086                 }
1087
1088                 TASK_INIT(&sq->sq_task, 0, mlx5e_tx_que, sq);
1089 #ifdef RSS
1090                 cpu_id = rss_getcpu(c->ix % rss_getnumbuckets());
1091                 CPU_SETOF(cpu_id, &cpu_mask);
1092                 taskqueue_start_threads_cpuset(&sq->sq_tq, 1, PI_NET, &cpu_mask,
1093                     "%s TX SQ%d.%d CPU%d", c->ifp->if_xname, c->ix, tc, cpu_id);
1094 #else
1095                 taskqueue_start_threads(&sq->sq_tq, 1, PI_NET,
1096                     "%s TX SQ%d.%d", c->ifp->if_xname, c->ix, tc);
1097 #endif
1098         }
1099         snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
1100         mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
1101             buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
1102             sq->stats.arg);
1103
1104         return (0);
1105
1106 err_free_drbr:
1107         buf_ring_free(sq->br, M_MLX5EN);
1108 err_free_sq_db:
1109         mlx5e_free_sq_db(sq);
1110 err_sq_wq_destroy:
1111         mlx5_wq_destroy(&sq->wq_ctrl);
1112
1113 err_unmap_free_uar:
1114         mlx5_unmap_free_uar(mdev, &sq->uar);
1115
1116 err_free_dma_tag:
1117         bus_dma_tag_destroy(sq->dma_tag);
1118 done:
1119         return (err);
1120 }
1121
1122 static void
1123 mlx5e_destroy_sq(struct mlx5e_sq *sq)
1124 {
1125         /* destroy all sysctl nodes */
1126         sysctl_ctx_free(&sq->stats.ctx);
1127
1128         mlx5e_free_sq_db(sq);
1129         mlx5_wq_destroy(&sq->wq_ctrl);
1130         mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar);
1131         if (sq->sq_tq != NULL) {
1132                 taskqueue_drain(sq->sq_tq, &sq->sq_task);
1133                 taskqueue_free(sq->sq_tq);
1134         }
1135         if (sq->br != NULL)
1136                 buf_ring_free(sq->br, M_MLX5EN);
1137 }
1138
1139 int
1140 mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param,
1141     int tis_num)
1142 {
1143         void *in;
1144         void *sqc;
1145         void *wq;
1146         int inlen;
1147         int err;
1148
1149         inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1150             sizeof(u64) * sq->wq_ctrl.buf.npages;
1151         in = mlx5_vzalloc(inlen);
1152         if (in == NULL)
1153                 return (-ENOMEM);
1154
1155         sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1156         wq = MLX5_ADDR_OF(sqc, sqc, wq);
1157
1158         memcpy(sqc, param->sqc, sizeof(param->sqc));
1159
1160         MLX5_SET(sqc, sqc, tis_num_0, tis_num);
1161         MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
1162         MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1163         MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1164         MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1165
1166         MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1167         MLX5_SET(wq, wq, uar_page, sq->uar.index);
1168         MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
1169             PAGE_SHIFT);
1170         MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
1171
1172         mlx5_fill_page_array(&sq->wq_ctrl.buf,
1173             (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
1174
1175         err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn);
1176
1177         kvfree(in);
1178
1179         return (err);
1180 }
1181
1182 int
1183 mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
1184 {
1185         void *in;
1186         void *sqc;
1187         int inlen;
1188         int err;
1189
1190         inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1191         in = mlx5_vzalloc(inlen);
1192         if (in == NULL)
1193                 return (-ENOMEM);
1194
1195         sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1196
1197         MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
1198         MLX5_SET(modify_sq_in, in, sq_state, curr_state);
1199         MLX5_SET(sqc, sqc, state, next_state);
1200
1201         err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen);
1202
1203         kvfree(in);
1204
1205         return (err);
1206 }
1207
1208 void
1209 mlx5e_disable_sq(struct mlx5e_sq *sq)
1210 {
1211
1212         mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn);
1213 }
1214
1215 static int
1216 mlx5e_open_sq(struct mlx5e_channel *c,
1217     int tc,
1218     struct mlx5e_sq_param *param,
1219     struct mlx5e_sq *sq)
1220 {
1221         int err;
1222
1223         err = mlx5e_create_sq(c, tc, param, sq);
1224         if (err)
1225                 return (err);
1226
1227         err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]);
1228         if (err)
1229                 goto err_destroy_sq;
1230
1231         err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
1232         if (err)
1233                 goto err_disable_sq;
1234
1235         atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_READY);
1236
1237         return (0);
1238
1239 err_disable_sq:
1240         mlx5e_disable_sq(sq);
1241 err_destroy_sq:
1242         mlx5e_destroy_sq(sq);
1243
1244         return (err);
1245 }
1246
1247 static void
1248 mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
1249 {
1250         /* fill up remainder with NOPs */
1251         while (sq->cev_counter != 0) {
1252                 while (!mlx5e_sq_has_room_for(sq, 1)) {
1253                         if (can_sleep != 0) {
1254                                 mtx_unlock(&sq->lock);
1255                                 msleep(4);
1256                                 mtx_lock(&sq->lock);
1257                         } else {
1258                                 goto done;
1259                         }
1260                 }
1261                 /* send a single NOP */
1262                 mlx5e_send_nop(sq, 1);
1263                 atomic_thread_fence_rel();
1264         }
1265 done:
1266         /* Check if we need to write the doorbell */
1267         if (likely(sq->doorbell.d64 != 0)) {
1268                 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
1269                 sq->doorbell.d64 = 0;
1270         }
1271 }
1272
1273 void
1274 mlx5e_sq_cev_timeout(void *arg)
1275 {
1276         struct mlx5e_sq *sq = arg;
1277
1278         mtx_assert(&sq->lock, MA_OWNED);
1279
1280         /* check next state */
1281         switch (sq->cev_next_state) {
1282         case MLX5E_CEV_STATE_SEND_NOPS:
1283                 /* fill TX ring with NOPs, if any */
1284                 mlx5e_sq_send_nops_locked(sq, 0);
1285
1286                 /* check if completed */
1287                 if (sq->cev_counter == 0) {
1288                         sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
1289                         return;
1290                 }
1291                 break;
1292         default:
1293                 /* send NOPs on next timeout */
1294                 sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
1295                 break;
1296         }
1297
1298         /* restart timer */
1299         callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
1300 }
1301
1302 void
1303 mlx5e_drain_sq(struct mlx5e_sq *sq)
1304 {
1305         int error;
1306         struct mlx5_core_dev *mdev= sq->priv->mdev;
1307
1308         /*
1309          * Check if already stopped.
1310          *
1311          * NOTE: The "stopped" variable is only written when both the
1312          * priv's configuration lock and the SQ's lock is locked. It
1313          * can therefore safely be read when only one of the two locks
1314          * is locked. This function is always called when the priv's
1315          * configuration lock is locked.
1316          */
1317         if (sq->stopped != 0)
1318                 return;
1319
1320         mtx_lock(&sq->lock);
1321
1322         /* don't put more packets into the SQ */
1323         sq->stopped = 1;
1324
1325         /* teardown event factor timer, if any */
1326         sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
1327         callout_stop(&sq->cev_callout);
1328
1329         /* send dummy NOPs in order to flush the transmit ring */
1330         mlx5e_sq_send_nops_locked(sq, 1);
1331         mtx_unlock(&sq->lock);
1332
1333         /* make sure it is safe to free the callout */
1334         callout_drain(&sq->cev_callout);
1335
1336         /* wait till SQ is empty or link is down */
1337         mtx_lock(&sq->lock);
1338         while (sq->cc != sq->pc &&
1339             (sq->priv->media_status_last & IFM_ACTIVE) != 0 &&
1340             mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1341                 mtx_unlock(&sq->lock);
1342                 msleep(1);
1343                 sq->cq.mcq.comp(&sq->cq.mcq);
1344                 mtx_lock(&sq->lock);
1345         }
1346         mtx_unlock(&sq->lock);
1347
1348         /* error out remaining requests */
1349         error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
1350         if (error != 0) {
1351                 if_printf(sq->ifp,
1352                     "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error);
1353         }
1354
1355         /* wait till SQ is empty */
1356         mtx_lock(&sq->lock);
1357         while (sq->cc != sq->pc &&
1358                mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1359                 mtx_unlock(&sq->lock);
1360                 msleep(1);
1361                 sq->cq.mcq.comp(&sq->cq.mcq);
1362                 mtx_lock(&sq->lock);
1363         }
1364         mtx_unlock(&sq->lock);
1365 }
1366
1367 static void
1368 mlx5e_close_sq_wait(struct mlx5e_sq *sq)
1369 {
1370
1371         mlx5e_drain_sq(sq);
1372         mlx5e_disable_sq(sq);
1373         mlx5e_destroy_sq(sq);
1374 }
1375
1376 static int
1377 mlx5e_create_cq(struct mlx5e_priv *priv,
1378     struct mlx5e_cq_param *param,
1379     struct mlx5e_cq *cq,
1380     mlx5e_cq_comp_t *comp,
1381     int eq_ix)
1382 {
1383         struct mlx5_core_dev *mdev = priv->mdev;
1384         struct mlx5_core_cq *mcq = &cq->mcq;
1385         int eqn_not_used;
1386         int irqn;
1387         int err;
1388         u32 i;
1389
1390         param->wq.buf_numa_node = 0;
1391         param->wq.db_numa_node = 0;
1392
1393         err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1394             &cq->wq_ctrl);
1395         if (err)
1396                 return (err);
1397
1398         mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn);
1399
1400         mcq->cqe_sz = 64;
1401         mcq->set_ci_db = cq->wq_ctrl.db.db;
1402         mcq->arm_db = cq->wq_ctrl.db.db + 1;
1403         *mcq->set_ci_db = 0;
1404         *mcq->arm_db = 0;
1405         mcq->vector = eq_ix;
1406         mcq->comp = comp;
1407         mcq->event = mlx5e_cq_error_event;
1408         mcq->irqn = irqn;
1409         mcq->uar = &priv->cq_uar;
1410
1411         for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1412                 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1413
1414                 cqe->op_own = 0xf1;
1415         }
1416
1417         cq->priv = priv;
1418
1419         return (0);
1420 }
1421
1422 static void
1423 mlx5e_destroy_cq(struct mlx5e_cq *cq)
1424 {
1425         mlx5_wq_destroy(&cq->wq_ctrl);
1426 }
1427
1428 static int
1429 mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix)
1430 {
1431         struct mlx5_core_cq *mcq = &cq->mcq;
1432         void *in;
1433         void *cqc;
1434         int inlen;
1435         int irqn_not_used;
1436         int eqn;
1437         int err;
1438
1439         inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1440             sizeof(u64) * cq->wq_ctrl.buf.npages;
1441         in = mlx5_vzalloc(inlen);
1442         if (in == NULL)
1443                 return (-ENOMEM);
1444
1445         cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1446
1447         memcpy(cqc, param->cqc, sizeof(param->cqc));
1448
1449         mlx5_fill_page_array(&cq->wq_ctrl.buf,
1450             (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
1451
1452         mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used);
1453
1454         MLX5_SET(cqc, cqc, c_eqn, eqn);
1455         MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
1456         MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1457             PAGE_SHIFT);
1458         MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1459
1460         err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen);
1461
1462         kvfree(in);
1463
1464         if (err)
1465                 return (err);
1466
1467         mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock));
1468
1469         return (0);
1470 }
1471
1472 static void
1473 mlx5e_disable_cq(struct mlx5e_cq *cq)
1474 {
1475
1476         mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq);
1477 }
1478
1479 int
1480 mlx5e_open_cq(struct mlx5e_priv *priv,
1481     struct mlx5e_cq_param *param,
1482     struct mlx5e_cq *cq,
1483     mlx5e_cq_comp_t *comp,
1484     int eq_ix)
1485 {
1486         int err;
1487
1488         err = mlx5e_create_cq(priv, param, cq, comp, eq_ix);
1489         if (err)
1490                 return (err);
1491
1492         err = mlx5e_enable_cq(cq, param, eq_ix);
1493         if (err)
1494                 goto err_destroy_cq;
1495
1496         return (0);
1497
1498 err_destroy_cq:
1499         mlx5e_destroy_cq(cq);
1500
1501         return (err);
1502 }
1503
1504 void
1505 mlx5e_close_cq(struct mlx5e_cq *cq)
1506 {
1507         mlx5e_disable_cq(cq);
1508         mlx5e_destroy_cq(cq);
1509 }
1510
1511 static int
1512 mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1513     struct mlx5e_channel_param *cparam)
1514 {
1515         int err;
1516         int tc;
1517
1518         for (tc = 0; tc < c->num_tc; tc++) {
1519                 /* open completion queue */
1520                 err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
1521                     &mlx5e_tx_cq_comp, c->ix);
1522                 if (err)
1523                         goto err_close_tx_cqs;
1524         }
1525         return (0);
1526
1527 err_close_tx_cqs:
1528         for (tc--; tc >= 0; tc--)
1529                 mlx5e_close_cq(&c->sq[tc].cq);
1530
1531         return (err);
1532 }
1533
1534 static void
1535 mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1536 {
1537         int tc;
1538
1539         for (tc = 0; tc < c->num_tc; tc++)
1540                 mlx5e_close_cq(&c->sq[tc].cq);
1541 }
1542
1543 static int
1544 mlx5e_open_sqs(struct mlx5e_channel *c,
1545     struct mlx5e_channel_param *cparam)
1546 {
1547         int err;
1548         int tc;
1549
1550         for (tc = 0; tc < c->num_tc; tc++) {
1551                 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1552                 if (err)
1553                         goto err_close_sqs;
1554         }
1555
1556         return (0);
1557
1558 err_close_sqs:
1559         for (tc--; tc >= 0; tc--)
1560                 mlx5e_close_sq_wait(&c->sq[tc]);
1561
1562         return (err);
1563 }
1564
1565 static void
1566 mlx5e_close_sqs_wait(struct mlx5e_channel *c)
1567 {
1568         int tc;
1569
1570         for (tc = 0; tc < c->num_tc; tc++)
1571                 mlx5e_close_sq_wait(&c->sq[tc]);
1572 }
1573
1574 static void
1575 mlx5e_chan_mtx_init(struct mlx5e_channel *c)
1576 {
1577         int tc;
1578
1579         mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
1580
1581         callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
1582
1583         for (tc = 0; tc < c->num_tc; tc++) {
1584                 struct mlx5e_sq *sq = c->sq + tc;
1585
1586                 mtx_init(&sq->lock, "mlx5tx",
1587                     MTX_NETWORK_LOCK " TX", MTX_DEF);
1588                 mtx_init(&sq->comp_lock, "mlx5comp",
1589                     MTX_NETWORK_LOCK " TX", MTX_DEF);
1590
1591                 callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
1592
1593                 sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
1594
1595                 /* ensure the TX completion event factor is not zero */
1596                 if (sq->cev_factor == 0)
1597                         sq->cev_factor = 1;
1598         }
1599 }
1600
1601 static void
1602 mlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
1603 {
1604         int tc;
1605
1606         mtx_destroy(&c->rq.mtx);
1607
1608         for (tc = 0; tc < c->num_tc; tc++) {
1609                 mtx_destroy(&c->sq[tc].lock);
1610                 mtx_destroy(&c->sq[tc].comp_lock);
1611         }
1612 }
1613
1614 static int
1615 mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1616     struct mlx5e_channel_param *cparam,
1617     struct mlx5e_channel *volatile *cp)
1618 {
1619         struct mlx5e_channel *c;
1620         int err;
1621
1622         c = malloc(sizeof(*c), M_MLX5EN, M_WAITOK | M_ZERO);
1623         c->priv = priv;
1624         c->ix = ix;
1625         c->cpu = 0;
1626         c->ifp = priv->ifp;
1627         c->mkey_be = cpu_to_be32(priv->mr.key);
1628         c->num_tc = priv->num_tc;
1629
1630         /* init mutexes */
1631         mlx5e_chan_mtx_init(c);
1632
1633         /* open transmit completion queue */
1634         err = mlx5e_open_tx_cqs(c, cparam);
1635         if (err)
1636                 goto err_free;
1637
1638         /* open receive completion queue */
1639         err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq,
1640             &mlx5e_rx_cq_comp, c->ix);
1641         if (err)
1642                 goto err_close_tx_cqs;
1643
1644         err = mlx5e_open_sqs(c, cparam);
1645         if (err)
1646                 goto err_close_rx_cq;
1647
1648         err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1649         if (err)
1650                 goto err_close_sqs;
1651
1652         /* store channel pointer */
1653         *cp = c;
1654
1655         /* poll receive queue initially */
1656         c->rq.cq.mcq.comp(&c->rq.cq.mcq);
1657
1658         return (0);
1659
1660 err_close_sqs:
1661         mlx5e_close_sqs_wait(c);
1662
1663 err_close_rx_cq:
1664         mlx5e_close_cq(&c->rq.cq);
1665
1666 err_close_tx_cqs:
1667         mlx5e_close_tx_cqs(c);
1668
1669 err_free:
1670         /* destroy mutexes */
1671         mlx5e_chan_mtx_destroy(c);
1672         free(c, M_MLX5EN);
1673         return (err);
1674 }
1675
1676 static void
1677 mlx5e_close_channel(struct mlx5e_channel *volatile *pp)
1678 {
1679         struct mlx5e_channel *c = *pp;
1680
1681         /* check if channel is already closed */
1682         if (c == NULL)
1683                 return;
1684         mlx5e_close_rq(&c->rq);
1685 }
1686
1687 static void
1688 mlx5e_close_channel_wait(struct mlx5e_channel *volatile *pp)
1689 {
1690         struct mlx5e_channel *c = *pp;
1691
1692         /* check if channel is already closed */
1693         if (c == NULL)
1694                 return;
1695         /* ensure channel pointer is no longer used */
1696         *pp = NULL;
1697
1698         mlx5e_close_rq_wait(&c->rq);
1699         mlx5e_close_sqs_wait(c);
1700         mlx5e_close_cq(&c->rq.cq);
1701         mlx5e_close_tx_cqs(c);
1702         /* destroy mutexes */
1703         mlx5e_chan_mtx_destroy(c);
1704         free(c, M_MLX5EN);
1705 }
1706
1707 static void
1708 mlx5e_build_rq_param(struct mlx5e_priv *priv,
1709     struct mlx5e_rq_param *param)
1710 {
1711         void *rqc = param->rqc;
1712         void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1713
1714         MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1715         MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1716         MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1717         MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1718         MLX5_SET(wq, wq, pd, priv->pdn);
1719
1720         param->wq.buf_numa_node = 0;
1721         param->wq.db_numa_node = 0;
1722         param->wq.linear = 1;
1723 }
1724
1725 static void
1726 mlx5e_build_sq_param(struct mlx5e_priv *priv,
1727     struct mlx5e_sq_param *param)
1728 {
1729         void *sqc = param->sqc;
1730         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1731
1732         MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1733         MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1734         MLX5_SET(wq, wq, pd, priv->pdn);
1735
1736         param->wq.buf_numa_node = 0;
1737         param->wq.db_numa_node = 0;
1738         param->wq.linear = 1;
1739 }
1740
1741 static void
1742 mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1743     struct mlx5e_cq_param *param)
1744 {
1745         void *cqc = param->cqc;
1746
1747         MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1748 }
1749
1750 static void
1751 mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1752     struct mlx5e_cq_param *param)
1753 {
1754         void *cqc = param->cqc;
1755
1756
1757         /*
1758          * TODO The sysctl to control on/off is a bool value for now, which means
1759          * we only support CSUM, once HASH is implemnted we'll need to address that.
1760          */
1761         if (priv->params.cqe_zipping_en) {
1762                 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1763                 MLX5_SET(cqc, cqc, cqe_compression_en, 1);
1764         }
1765
1766         MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
1767         MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
1768         MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
1769
1770         switch (priv->params.rx_cq_moderation_mode) {
1771         case 0:
1772                 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1773                 break;
1774         default:
1775                 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1776                         MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1777                 else
1778                         MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1779                 break;
1780         }
1781
1782         mlx5e_build_common_cq_param(priv, param);
1783 }
1784
1785 static void
1786 mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1787     struct mlx5e_cq_param *param)
1788 {
1789         void *cqc = param->cqc;
1790
1791         MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1792         MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
1793         MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
1794
1795         switch (priv->params.tx_cq_moderation_mode) {
1796         case 0:
1797                 MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1798                 break;
1799         default:
1800                 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1801                         MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
1802                 else
1803                         MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
1804                 break;
1805         }
1806
1807         mlx5e_build_common_cq_param(priv, param);
1808 }
1809
1810 static void
1811 mlx5e_build_channel_param(struct mlx5e_priv *priv,
1812     struct mlx5e_channel_param *cparam)
1813 {
1814         memset(cparam, 0, sizeof(*cparam));
1815
1816         mlx5e_build_rq_param(priv, &cparam->rq);
1817         mlx5e_build_sq_param(priv, &cparam->sq);
1818         mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1819         mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1820 }
1821
1822 static int
1823 mlx5e_open_channels(struct mlx5e_priv *priv)
1824 {
1825         struct mlx5e_channel_param cparam;
1826         void *ptr;
1827         int err;
1828         int i;
1829         int j;
1830
1831         priv->channel = malloc(priv->params.num_channels *
1832             sizeof(struct mlx5e_channel *), M_MLX5EN, M_WAITOK | M_ZERO);
1833
1834         mlx5e_build_channel_param(priv, &cparam);
1835         for (i = 0; i < priv->params.num_channels; i++) {
1836                 err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1837                 if (err)
1838                         goto err_close_channels;
1839         }
1840
1841         for (j = 0; j < priv->params.num_channels; j++) {
1842                 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1843                 if (err)
1844                         goto err_close_channels;
1845         }
1846
1847         return (0);
1848
1849 err_close_channels:
1850         for (i--; i >= 0; i--) {
1851                 mlx5e_close_channel(&priv->channel[i]);
1852                 mlx5e_close_channel_wait(&priv->channel[i]);
1853         }
1854
1855         /* remove "volatile" attribute from "channel" pointer */
1856         ptr = __DECONST(void *, priv->channel);
1857         priv->channel = NULL;
1858
1859         free(ptr, M_MLX5EN);
1860
1861         return (err);
1862 }
1863
1864 static void
1865 mlx5e_close_channels(struct mlx5e_priv *priv)
1866 {
1867         void *ptr;
1868         int i;
1869
1870         if (priv->channel == NULL)
1871                 return;
1872
1873         for (i = 0; i < priv->params.num_channels; i++)
1874                 mlx5e_close_channel(&priv->channel[i]);
1875         for (i = 0; i < priv->params.num_channels; i++)
1876                 mlx5e_close_channel_wait(&priv->channel[i]);
1877
1878         /* remove "volatile" attribute from "channel" pointer */
1879         ptr = __DECONST(void *, priv->channel);
1880         priv->channel = NULL;
1881
1882         free(ptr, M_MLX5EN);
1883 }
1884
1885 static int
1886 mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
1887 {
1888
1889         if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
1890                 uint8_t cq_mode;
1891
1892                 switch (priv->params.tx_cq_moderation_mode) {
1893                 case 0:
1894                         cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1895                         break;
1896                 default:
1897                         cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
1898                         break;
1899                 }
1900
1901                 return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq,
1902                     priv->params.tx_cq_moderation_usec,
1903                     priv->params.tx_cq_moderation_pkts,
1904                     cq_mode));
1905         }
1906
1907         return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq,
1908             priv->params.tx_cq_moderation_usec,
1909             priv->params.tx_cq_moderation_pkts));
1910 }
1911
1912 static int
1913 mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq)
1914 {
1915
1916         if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
1917                 uint8_t cq_mode;
1918                 int retval;
1919
1920                 switch (priv->params.rx_cq_moderation_mode) {
1921                 case 0:
1922                         cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1923                         break;
1924                 default:
1925                         cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
1926                         break;
1927                 }
1928
1929                 retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
1930                     priv->params.rx_cq_moderation_usec,
1931                     priv->params.rx_cq_moderation_pkts,
1932                     cq_mode);
1933
1934                 return (retval);
1935         }
1936
1937         return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq,
1938             priv->params.rx_cq_moderation_usec,
1939             priv->params.rx_cq_moderation_pkts));
1940 }
1941
1942 static int
1943 mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
1944 {
1945         int err;
1946         int i;
1947
1948         if (c == NULL)
1949                 return (EINVAL);
1950
1951         err = mlx5e_refresh_rq_params(priv, &c->rq);
1952         if (err)
1953                 goto done;
1954
1955         for (i = 0; i != c->num_tc; i++) {
1956                 err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
1957                 if (err)
1958                         goto done;
1959         }
1960 done:
1961         return (err);
1962 }
1963
1964 int
1965 mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
1966 {
1967         int i;
1968
1969         if (priv->channel == NULL)
1970                 return (EINVAL);
1971
1972         for (i = 0; i < priv->params.num_channels; i++) {
1973                 int err;
1974
1975                 err = mlx5e_refresh_channel_params_sub(priv, priv->channel[i]);
1976                 if (err)
1977                         return (err);
1978         }
1979         return (0);
1980 }
1981
1982 static int
1983 mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
1984 {
1985         struct mlx5_core_dev *mdev = priv->mdev;
1986         u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1987         void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1988
1989         memset(in, 0, sizeof(in));
1990
1991         MLX5_SET(tisc, tisc, prio, tc);
1992         MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
1993
1994         return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
1995 }
1996
1997 static void
1998 mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
1999 {
2000         mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
2001 }
2002
2003 static int
2004 mlx5e_open_tises(struct mlx5e_priv *priv)
2005 {
2006         int num_tc = priv->num_tc;
2007         int err;
2008         int tc;
2009
2010         for (tc = 0; tc < num_tc; tc++) {
2011                 err = mlx5e_open_tis(priv, tc);
2012                 if (err)
2013                         goto err_close_tises;
2014         }
2015
2016         return (0);
2017
2018 err_close_tises:
2019         for (tc--; tc >= 0; tc--)
2020                 mlx5e_close_tis(priv, tc);
2021
2022         return (err);
2023 }
2024
2025 static void
2026 mlx5e_close_tises(struct mlx5e_priv *priv)
2027 {
2028         int num_tc = priv->num_tc;
2029         int tc;
2030
2031         for (tc = 0; tc < num_tc; tc++)
2032                 mlx5e_close_tis(priv, tc);
2033 }
2034
2035 static int
2036 mlx5e_open_rqt(struct mlx5e_priv *priv)
2037 {
2038         struct mlx5_core_dev *mdev = priv->mdev;
2039         u32 *in;
2040         u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
2041         void *rqtc;
2042         int inlen;
2043         int err;
2044         int sz;
2045         int i;
2046
2047         sz = 1 << priv->params.rx_hash_log_tbl_sz;
2048
2049         inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2050         in = mlx5_vzalloc(inlen);
2051         if (in == NULL)
2052                 return (-ENOMEM);
2053         rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2054
2055         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2056         MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2057
2058         for (i = 0; i < sz; i++) {
2059                 int ix;
2060 #ifdef RSS
2061                 ix = rss_get_indirection_to_bucket(i);
2062 #else
2063                 ix = i;
2064 #endif
2065                 /* ensure we don't overflow */
2066                 ix %= priv->params.num_channels;
2067                 MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
2068         }
2069
2070         MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
2071
2072         err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
2073         if (!err)
2074                 priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
2075
2076         kvfree(in);
2077
2078         return (err);
2079 }
2080
2081 static void
2082 mlx5e_close_rqt(struct mlx5e_priv *priv)
2083 {
2084         u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0};
2085         u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
2086
2087         MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
2088         MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
2089
2090         mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
2091 }
2092
2093 static void
2094 mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
2095 {
2096         void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2097         __be32 *hkey;
2098
2099         MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
2100
2101 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2102
2103 #define MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2104                           MLX5_HASH_FIELD_SEL_DST_IP)
2105
2106 #define MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2107                           MLX5_HASH_FIELD_SEL_DST_IP   |\
2108                           MLX5_HASH_FIELD_SEL_L4_SPORT |\
2109                           MLX5_HASH_FIELD_SEL_L4_DPORT)
2110
2111 #define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2112                                  MLX5_HASH_FIELD_SEL_DST_IP   |\
2113                                  MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2114
2115         if (priv->params.hw_lro_en) {
2116                 MLX5_SET(tirc, tirc, lro_enable_mask,
2117                     MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2118                     MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2119                 MLX5_SET(tirc, tirc, lro_max_msg_sz,
2120                     (priv->params.lro_wqe_sz -
2121                     ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2122                 /* TODO: add the option to choose timer value dynamically */
2123                 MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
2124                     MLX5_CAP_ETH(priv->mdev,
2125                     lro_timer_supported_periods[2]));
2126         }
2127
2128         /* setup parameters for hashing TIR type, if any */
2129         switch (tt) {
2130         case MLX5E_TT_ANY:
2131                 MLX5_SET(tirc, tirc, disp_type,
2132                     MLX5_TIRC_DISP_TYPE_DIRECT);
2133                 MLX5_SET(tirc, tirc, inline_rqn,
2134                     priv->channel[0]->rq.rqn);
2135                 break;
2136         default:
2137                 MLX5_SET(tirc, tirc, disp_type,
2138                     MLX5_TIRC_DISP_TYPE_INDIRECT);
2139                 MLX5_SET(tirc, tirc, indirect_table,
2140                     priv->rqtn);
2141                 MLX5_SET(tirc, tirc, rx_hash_fn,
2142                     MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
2143                 hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
2144 #ifdef RSS
2145                 /*
2146                  * The FreeBSD RSS implementation does currently not
2147                  * support symmetric Toeplitz hashes:
2148                  */
2149                 MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
2150                 rss_getkey((uint8_t *)hkey);
2151 #else
2152                 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2153                 hkey[0] = cpu_to_be32(0xD181C62C);
2154                 hkey[1] = cpu_to_be32(0xF7F4DB5B);
2155                 hkey[2] = cpu_to_be32(0x1983A2FC);
2156                 hkey[3] = cpu_to_be32(0x943E1ADB);
2157                 hkey[4] = cpu_to_be32(0xD9389E6B);
2158                 hkey[5] = cpu_to_be32(0xD1039C2C);
2159                 hkey[6] = cpu_to_be32(0xA74499AD);
2160                 hkey[7] = cpu_to_be32(0x593D56D9);
2161                 hkey[8] = cpu_to_be32(0xF3253C06);
2162                 hkey[9] = cpu_to_be32(0x2ADC1FFC);
2163 #endif
2164                 break;
2165         }
2166
2167         switch (tt) {
2168         case MLX5E_TT_IPV4_TCP:
2169                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2170                     MLX5_L3_PROT_TYPE_IPV4);
2171                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2172                     MLX5_L4_PROT_TYPE_TCP);
2173 #ifdef RSS
2174                 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
2175                         MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2176                             MLX5_HASH_IP);
2177                 } else
2178 #endif
2179                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2180                     MLX5_HASH_ALL);
2181                 break;
2182
2183         case MLX5E_TT_IPV6_TCP:
2184                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2185                     MLX5_L3_PROT_TYPE_IPV6);
2186                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2187                     MLX5_L4_PROT_TYPE_TCP);
2188 #ifdef RSS
2189                 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
2190                         MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2191                             MLX5_HASH_IP);
2192                 } else
2193 #endif
2194                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2195                     MLX5_HASH_ALL);
2196                 break;
2197
2198         case MLX5E_TT_IPV4_UDP:
2199                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2200                     MLX5_L3_PROT_TYPE_IPV4);
2201                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2202                     MLX5_L4_PROT_TYPE_UDP);
2203 #ifdef RSS
2204                 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
2205                         MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2206                             MLX5_HASH_IP);
2207                 } else
2208 #endif
2209                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2210                     MLX5_HASH_ALL);
2211                 break;
2212
2213         case MLX5E_TT_IPV6_UDP:
2214                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2215                     MLX5_L3_PROT_TYPE_IPV6);
2216                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2217                     MLX5_L4_PROT_TYPE_UDP);
2218 #ifdef RSS
2219                 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
2220                         MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2221                             MLX5_HASH_IP);
2222                 } else
2223 #endif
2224                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2225                     MLX5_HASH_ALL);
2226                 break;
2227
2228         case MLX5E_TT_IPV4_IPSEC_AH:
2229                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2230                     MLX5_L3_PROT_TYPE_IPV4);
2231                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2232                     MLX5_HASH_IP_IPSEC_SPI);
2233                 break;
2234
2235         case MLX5E_TT_IPV6_IPSEC_AH:
2236                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2237                     MLX5_L3_PROT_TYPE_IPV6);
2238                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2239                     MLX5_HASH_IP_IPSEC_SPI);
2240                 break;
2241
2242         case MLX5E_TT_IPV4_IPSEC_ESP:
2243                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2244                     MLX5_L3_PROT_TYPE_IPV4);
2245                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2246                     MLX5_HASH_IP_IPSEC_SPI);
2247                 break;
2248
2249         case MLX5E_TT_IPV6_IPSEC_ESP:
2250                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2251                     MLX5_L3_PROT_TYPE_IPV6);
2252                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2253                     MLX5_HASH_IP_IPSEC_SPI);
2254                 break;
2255
2256         case MLX5E_TT_IPV4:
2257                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2258                     MLX5_L3_PROT_TYPE_IPV4);
2259                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2260                     MLX5_HASH_IP);
2261                 break;
2262
2263         case MLX5E_TT_IPV6:
2264                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2265                     MLX5_L3_PROT_TYPE_IPV6);
2266                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2267                     MLX5_HASH_IP);
2268                 break;
2269
2270         default:
2271                 break;
2272         }
2273 }
2274
2275 static int
2276 mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
2277 {
2278         struct mlx5_core_dev *mdev = priv->mdev;
2279         u32 *in;
2280         void *tirc;
2281         int inlen;
2282         int err;
2283
2284         inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2285         in = mlx5_vzalloc(inlen);
2286         if (in == NULL)
2287                 return (-ENOMEM);
2288         tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
2289
2290         mlx5e_build_tir_ctx(priv, tirc, tt);
2291
2292         err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
2293
2294         kvfree(in);
2295
2296         return (err);
2297 }
2298
2299 static void
2300 mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
2301 {
2302         mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
2303 }
2304
2305 static int
2306 mlx5e_open_tirs(struct mlx5e_priv *priv)
2307 {
2308         int err;
2309         int i;
2310
2311         for (i = 0; i < MLX5E_NUM_TT; i++) {
2312                 err = mlx5e_open_tir(priv, i);
2313                 if (err)
2314                         goto err_close_tirs;
2315         }
2316
2317         return (0);
2318
2319 err_close_tirs:
2320         for (i--; i >= 0; i--)
2321                 mlx5e_close_tir(priv, i);
2322
2323         return (err);
2324 }
2325
2326 static void
2327 mlx5e_close_tirs(struct mlx5e_priv *priv)
2328 {
2329         int i;
2330
2331         for (i = 0; i < MLX5E_NUM_TT; i++)
2332                 mlx5e_close_tir(priv, i);
2333 }
2334
2335 /*
2336  * SW MTU does not include headers,
2337  * HW MTU includes all headers and checksums.
2338  */
2339 static int
2340 mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
2341 {
2342         struct mlx5e_priv *priv = ifp->if_softc;
2343         struct mlx5_core_dev *mdev = priv->mdev;
2344         int hw_mtu;
2345         int err;
2346
2347         hw_mtu = MLX5E_SW2HW_MTU(sw_mtu);
2348
2349         err = mlx5_set_port_mtu(mdev, hw_mtu);
2350         if (err) {
2351                 if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n",
2352                     __func__, sw_mtu, err);
2353                 return (err);
2354         }
2355
2356         /* Update vport context MTU */
2357         err = mlx5_set_vport_mtu(mdev, hw_mtu);
2358         if (err) {
2359                 if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n",
2360                     __func__, err);
2361         }
2362
2363         ifp->if_mtu = sw_mtu;
2364
2365         err = mlx5_query_vport_mtu(mdev, &hw_mtu);
2366         if (err || !hw_mtu) {
2367                 /* fallback to port oper mtu */
2368                 err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
2369         }
2370         if (err) {
2371                 if_printf(ifp, "Query port MTU, after setting new "
2372                     "MTU value, failed\n");
2373                 return (err);
2374         } else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
2375                 err = -E2BIG,
2376                 if_printf(ifp, "Port MTU %d is smaller than "
2377                     "ifp mtu %d\n", hw_mtu, sw_mtu);
2378         } else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) {
2379                 err = -EINVAL;
2380                 if_printf(ifp, "Port MTU %d is bigger than "
2381                     "ifp mtu %d\n", hw_mtu, sw_mtu);
2382         }
2383         priv->params_ethtool.hw_mtu = hw_mtu;
2384
2385         return (err);
2386 }
2387
2388 int
2389 mlx5e_open_locked(struct ifnet *ifp)
2390 {
2391         struct mlx5e_priv *priv = ifp->if_softc;
2392         int err;
2393         u16 set_id;
2394
2395         /* check if already opened */
2396         if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2397                 return (0);
2398
2399 #ifdef RSS
2400         if (rss_getnumbuckets() > priv->params.num_channels) {
2401                 if_printf(ifp, "NOTE: There are more RSS buckets(%u) than "
2402                     "channels(%u) available\n", rss_getnumbuckets(),
2403                     priv->params.num_channels);
2404         }
2405 #endif
2406         err = mlx5e_open_tises(priv);
2407         if (err) {
2408                 if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n",
2409                     __func__, err);
2410                 return (err);
2411         }
2412         err = mlx5_vport_alloc_q_counter(priv->mdev,
2413             MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
2414         if (err) {
2415                 if_printf(priv->ifp,
2416                     "%s: mlx5_vport_alloc_q_counter failed: %d\n",
2417                     __func__, err);
2418                 goto err_close_tises;
2419         }
2420         /* store counter set ID */
2421         priv->counter_set_id = set_id;
2422
2423         err = mlx5e_open_channels(priv);
2424         if (err) {
2425                 if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n",
2426                     __func__, err);
2427                 goto err_dalloc_q_counter;
2428         }
2429         err = mlx5e_open_rqt(priv);
2430         if (err) {
2431                 if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n",
2432                     __func__, err);
2433                 goto err_close_channels;
2434         }
2435         err = mlx5e_open_tirs(priv);
2436         if (err) {
2437                 if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n",
2438                     __func__, err);
2439                 goto err_close_rqls;
2440         }
2441         err = mlx5e_open_flow_table(priv);
2442         if (err) {
2443                 if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n",
2444                     __func__, err);
2445                 goto err_close_tirs;
2446         }
2447         err = mlx5e_add_all_vlan_rules(priv);
2448         if (err) {
2449                 if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
2450                     __func__, err);
2451                 goto err_close_flow_table;
2452         }
2453         set_bit(MLX5E_STATE_OPENED, &priv->state);
2454
2455         mlx5e_update_carrier(priv);
2456         mlx5e_set_rx_mode_core(priv);
2457
2458         return (0);
2459
2460 err_close_flow_table:
2461         mlx5e_close_flow_table(priv);
2462
2463 err_close_tirs:
2464         mlx5e_close_tirs(priv);
2465
2466 err_close_rqls:
2467         mlx5e_close_rqt(priv);
2468
2469 err_close_channels:
2470         mlx5e_close_channels(priv);
2471
2472 err_dalloc_q_counter:
2473         mlx5_vport_dealloc_q_counter(priv->mdev,
2474             MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2475
2476 err_close_tises:
2477         mlx5e_close_tises(priv);
2478
2479         return (err);
2480 }
2481
2482 static void
2483 mlx5e_open(void *arg)
2484 {
2485         struct mlx5e_priv *priv = arg;
2486
2487         PRIV_LOCK(priv);
2488         if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
2489                 if_printf(priv->ifp,
2490                     "%s: Setting port status to up failed\n",
2491                     __func__);
2492
2493         mlx5e_open_locked(priv->ifp);
2494         priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2495         PRIV_UNLOCK(priv);
2496 }
2497
2498 int
2499 mlx5e_close_locked(struct ifnet *ifp)
2500 {
2501         struct mlx5e_priv *priv = ifp->if_softc;
2502
2503         /* check if already closed */
2504         if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2505                 return (0);
2506
2507         clear_bit(MLX5E_STATE_OPENED, &priv->state);
2508
2509         mlx5e_set_rx_mode_core(priv);
2510         mlx5e_del_all_vlan_rules(priv);
2511         if_link_state_change(priv->ifp, LINK_STATE_DOWN);
2512         mlx5e_close_flow_table(priv);
2513         mlx5e_close_tirs(priv);
2514         mlx5e_close_rqt(priv);
2515         mlx5e_close_channels(priv);
2516         mlx5_vport_dealloc_q_counter(priv->mdev,
2517             MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
2518         mlx5e_close_tises(priv);
2519
2520         return (0);
2521 }
2522
2523 #if (__FreeBSD_version >= 1100000)
2524 static uint64_t
2525 mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
2526 {
2527         struct mlx5e_priv *priv = ifp->if_softc;
2528         u64 retval;
2529
2530         /* PRIV_LOCK(priv); XXX not allowed */
2531         switch (cnt) {
2532         case IFCOUNTER_IPACKETS:
2533                 retval = priv->stats.vport.rx_packets;
2534                 break;
2535         case IFCOUNTER_IERRORS:
2536                 retval = priv->stats.vport.rx_error_packets +
2537                     priv->stats.pport.alignment_err +
2538                     priv->stats.pport.check_seq_err +
2539                     priv->stats.pport.crc_align_errors +
2540                     priv->stats.pport.in_range_len_errors +
2541                     priv->stats.pport.jabbers +
2542                     priv->stats.pport.out_of_range_len +
2543                     priv->stats.pport.oversize_pkts +
2544                     priv->stats.pport.symbol_err +
2545                     priv->stats.pport.too_long_errors +
2546                     priv->stats.pport.undersize_pkts +
2547                     priv->stats.pport.unsupported_op_rx;
2548                 break;
2549         case IFCOUNTER_IQDROPS:
2550                 retval = priv->stats.vport.rx_out_of_buffer +
2551                     priv->stats.pport.drop_events;
2552                 break;
2553         case IFCOUNTER_OPACKETS:
2554                 retval = priv->stats.vport.tx_packets;
2555                 break;
2556         case IFCOUNTER_OERRORS:
2557                 retval = priv->stats.vport.tx_error_packets;
2558                 break;
2559         case IFCOUNTER_IBYTES:
2560                 retval = priv->stats.vport.rx_bytes;
2561                 break;
2562         case IFCOUNTER_OBYTES:
2563                 retval = priv->stats.vport.tx_bytes;
2564                 break;
2565         case IFCOUNTER_IMCASTS:
2566                 retval = priv->stats.vport.rx_multicast_packets;
2567                 break;
2568         case IFCOUNTER_OMCASTS:
2569                 retval = priv->stats.vport.tx_multicast_packets;
2570                 break;
2571         case IFCOUNTER_OQDROPS:
2572                 retval = priv->stats.vport.tx_queue_dropped;
2573                 break;
2574         case IFCOUNTER_COLLISIONS:
2575                 retval = priv->stats.pport.collisions;
2576                 break;
2577         default:
2578                 retval = if_get_counter_default(ifp, cnt);
2579                 break;
2580         }
2581         /* PRIV_UNLOCK(priv); XXX not allowed */
2582         return (retval);
2583 }
2584 #endif
2585
2586 static void
2587 mlx5e_set_rx_mode(struct ifnet *ifp)
2588 {
2589         struct mlx5e_priv *priv = ifp->if_softc;
2590
2591         queue_work(priv->wq, &priv->set_rx_mode_work);
2592 }
2593
2594 static int
2595 mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2596 {
2597         struct mlx5e_priv *priv;
2598         struct ifreq *ifr;
2599         struct ifi2creq i2c;
2600         int error = 0;
2601         int mask = 0;
2602         int size_read = 0;
2603         int module_status;
2604         int module_num;
2605         int max_mtu;
2606         uint8_t read_addr;
2607
2608         priv = ifp->if_softc;
2609
2610         /* check if detaching */
2611         if (priv == NULL || priv->gone != 0)
2612                 return (ENXIO);
2613
2614         switch (command) {
2615         case SIOCSIFMTU:
2616                 ifr = (struct ifreq *)data;
2617
2618                 PRIV_LOCK(priv);
2619                 mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
2620
2621                 if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
2622                     ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
2623                         int was_opened;
2624
2625                         was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2626                         if (was_opened)
2627                                 mlx5e_close_locked(ifp);
2628
2629                         /* set new MTU */
2630                         mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
2631
2632                         if (was_opened)
2633                                 mlx5e_open_locked(ifp);
2634                 } else {
2635                         error = EINVAL;
2636                         if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n",
2637                             MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
2638                 }
2639                 PRIV_UNLOCK(priv);
2640                 break;
2641         case SIOCSIFFLAGS:
2642                 if ((ifp->if_flags & IFF_UP) &&
2643                     (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2644                         mlx5e_set_rx_mode(ifp);
2645                         break;
2646                 }
2647                 PRIV_LOCK(priv);
2648                 if (ifp->if_flags & IFF_UP) {
2649                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2650                                 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2651                                         mlx5e_open_locked(ifp);
2652                                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2653                                 mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
2654                         }
2655                 } else {
2656                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2657                                 mlx5_set_port_status(priv->mdev,
2658                                     MLX5_PORT_DOWN);
2659                                 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2660                                         mlx5e_close_locked(ifp);
2661                                 mlx5e_update_carrier(priv);
2662                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2663                         }
2664                 }
2665                 PRIV_UNLOCK(priv);
2666                 break;
2667         case SIOCADDMULTI:
2668         case SIOCDELMULTI:
2669                 mlx5e_set_rx_mode(ifp);
2670                 break;
2671         case SIOCSIFMEDIA:
2672         case SIOCGIFMEDIA:
2673         case SIOCGIFXMEDIA:
2674                 ifr = (struct ifreq *)data;
2675                 error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
2676                 break;
2677         case SIOCSIFCAP:
2678                 ifr = (struct ifreq *)data;
2679                 PRIV_LOCK(priv);
2680                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2681
2682                 if (mask & IFCAP_TXCSUM) {
2683                         ifp->if_capenable ^= IFCAP_TXCSUM;
2684                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2685
2686                         if (IFCAP_TSO4 & ifp->if_capenable &&
2687                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
2688                                 ifp->if_capenable &= ~IFCAP_TSO4;
2689                                 ifp->if_hwassist &= ~CSUM_IP_TSO;
2690                                 if_printf(ifp,
2691                                     "tso4 disabled due to -txcsum.\n");
2692                         }
2693                 }
2694                 if (mask & IFCAP_TXCSUM_IPV6) {
2695                         ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2696                         ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2697
2698                         if (IFCAP_TSO6 & ifp->if_capenable &&
2699                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2700                                 ifp->if_capenable &= ~IFCAP_TSO6;
2701                                 ifp->if_hwassist &= ~CSUM_IP6_TSO;
2702                                 if_printf(ifp,
2703                                     "tso6 disabled due to -txcsum6.\n");
2704                         }
2705                 }
2706                 if (mask & IFCAP_RXCSUM)
2707                         ifp->if_capenable ^= IFCAP_RXCSUM;
2708                 if (mask & IFCAP_RXCSUM_IPV6)
2709                         ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2710                 if (mask & IFCAP_TSO4) {
2711                         if (!(IFCAP_TSO4 & ifp->if_capenable) &&
2712                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
2713                                 if_printf(ifp, "enable txcsum first.\n");
2714                                 error = EAGAIN;
2715                                 goto out;
2716                         }
2717                         ifp->if_capenable ^= IFCAP_TSO4;
2718                         ifp->if_hwassist ^= CSUM_IP_TSO;
2719                 }
2720                 if (mask & IFCAP_TSO6) {
2721                         if (!(IFCAP_TSO6 & ifp->if_capenable) &&
2722                             !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2723                                 if_printf(ifp, "enable txcsum6 first.\n");
2724                                 error = EAGAIN;
2725                                 goto out;
2726                         }
2727                         ifp->if_capenable ^= IFCAP_TSO6;
2728                         ifp->if_hwassist ^= CSUM_IP6_TSO;
2729                 }
2730                 if (mask & IFCAP_VLAN_HWFILTER) {
2731                         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2732                                 mlx5e_disable_vlan_filter(priv);
2733                         else
2734                                 mlx5e_enable_vlan_filter(priv);
2735
2736                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
2737                 }
2738                 if (mask & IFCAP_VLAN_HWTAGGING)
2739                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2740                 if (mask & IFCAP_WOL_MAGIC)
2741                         ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2742
2743                 VLAN_CAPABILITIES(ifp);
2744                 /* turn off LRO means also turn of HW LRO - if it's on */
2745                 if (mask & IFCAP_LRO) {
2746                         int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2747                         bool need_restart = false;
2748
2749                         ifp->if_capenable ^= IFCAP_LRO;
2750                         if (!(ifp->if_capenable & IFCAP_LRO)) {
2751                                 if (priv->params.hw_lro_en) {
2752                                         priv->params.hw_lro_en = false;
2753                                         need_restart = true;
2754                                         /* Not sure this is the correct way */
2755                                         priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
2756                                 }
2757                         }
2758                         if (was_opened && need_restart) {
2759                                 mlx5e_close_locked(ifp);
2760                                 mlx5e_open_locked(ifp);
2761                         }
2762                 }
2763 out:
2764                 PRIV_UNLOCK(priv);
2765                 break;
2766
2767         case SIOCGI2C:
2768                 ifr = (struct ifreq *)data;
2769
2770                 /*
2771                  * Copy from the user-space address ifr_data to the
2772                  * kernel-space address i2c
2773                  */
2774                 error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
2775                 if (error)
2776                         break;
2777
2778                 if (i2c.len > sizeof(i2c.data)) {
2779                         error = EINVAL;
2780                         break;
2781                 }
2782
2783                 PRIV_LOCK(priv);
2784                 /* Get module_num which is required for the query_eeprom */
2785                 error = mlx5_query_module_num(priv->mdev, &module_num);
2786                 if (error) {
2787                         if_printf(ifp, "Query module num failed, eeprom "
2788                             "reading is not supported\n");
2789                         error = EINVAL;
2790                         goto err_i2c;
2791                 }
2792                 /* Check if module is present before doing an access */
2793                 module_status = mlx5_query_module_status(priv->mdev, module_num);
2794                 if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED &&
2795                     module_status != MLX5_MODULE_STATUS_PLUGGED_DISABLED) {
2796                         error = EINVAL;
2797                         goto err_i2c;
2798                 }
2799                 /*
2800                  * Currently 0XA0 and 0xA2 are the only addresses permitted.
2801                  * The internal conversion is as follows:
2802                  */
2803                 if (i2c.dev_addr == 0xA0)
2804                         read_addr = MLX5E_I2C_ADDR_LOW;
2805                 else if (i2c.dev_addr == 0xA2)
2806                         read_addr = MLX5E_I2C_ADDR_HIGH;
2807                 else {
2808                         if_printf(ifp, "Query eeprom failed, "
2809                             "Invalid Address: %X\n", i2c.dev_addr);
2810                         error = EINVAL;
2811                         goto err_i2c;
2812                 }
2813                 error = mlx5_query_eeprom(priv->mdev,
2814                     read_addr, MLX5E_EEPROM_LOW_PAGE,
2815                     (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
2816                     (uint32_t *)i2c.data, &size_read);
2817                 if (error) {
2818                         if_printf(ifp, "Query eeprom failed, eeprom "
2819                             "reading is not supported\n");
2820                         error = EINVAL;
2821                         goto err_i2c;
2822                 }
2823
2824                 if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
2825                         error = mlx5_query_eeprom(priv->mdev,
2826                             read_addr, MLX5E_EEPROM_LOW_PAGE,
2827                             (uint32_t)(i2c.offset + size_read),
2828                             (uint32_t)(i2c.len - size_read), module_num,
2829                             (uint32_t *)(i2c.data + size_read), &size_read);
2830                 }
2831                 if (error) {
2832                         if_printf(ifp, "Query eeprom failed, eeprom "
2833                             "reading is not supported\n");
2834                         error = EINVAL;
2835                         goto err_i2c;
2836                 }
2837
2838                 error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
2839 err_i2c:
2840                 PRIV_UNLOCK(priv);
2841                 break;
2842
2843         default:
2844                 error = ether_ioctl(ifp, command, data);
2845                 break;
2846         }
2847         return (error);
2848 }
2849
2850 static int
2851 mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2852 {
2853         /*
2854          * TODO: uncoment once FW really sets all these bits if
2855          * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
2856          * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
2857          * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
2858          * -ENOTSUPP;
2859          */
2860
2861         /* TODO: add more must-to-have features */
2862
2863         if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2864                 return (-ENODEV);
2865
2866         return (0);
2867 }
2868
2869 static void
2870 mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
2871     struct mlx5e_priv *priv,
2872     int num_comp_vectors)
2873 {
2874         /*
2875          * TODO: Consider link speed for setting "log_sq_size",
2876          * "log_rq_size" and "cq_moderation_xxx":
2877          */
2878         priv->params.log_sq_size =
2879             MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
2880         priv->params.log_rq_size =
2881             MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2882         priv->params.rx_cq_moderation_usec =
2883             MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
2884             MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
2885             MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2886         priv->params.rx_cq_moderation_mode =
2887             MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
2888         priv->params.rx_cq_moderation_pkts =
2889             MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2890         priv->params.tx_cq_moderation_usec =
2891             MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
2892         priv->params.tx_cq_moderation_pkts =
2893             MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
2894         priv->params.min_rx_wqes =
2895             MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
2896         priv->params.rx_hash_log_tbl_sz =
2897             (order_base_2(num_comp_vectors) >
2898             MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
2899             order_base_2(num_comp_vectors) :
2900             MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
2901         priv->params.num_tc = 1;
2902         priv->params.default_vlan_prio = 0;
2903         priv->counter_set_id = -1;
2904
2905         /*
2906          * hw lro is currently defaulted to off. when it won't anymore we
2907          * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
2908          */
2909         priv->params.hw_lro_en = false;
2910         priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
2911
2912         priv->params.cqe_zipping_en = !!MLX5_CAP_GEN(mdev, cqe_compression);
2913
2914         priv->mdev = mdev;
2915         priv->params.num_channels = num_comp_vectors;
2916         priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
2917         priv->queue_mapping_channel_mask =
2918             roundup_pow_of_two(num_comp_vectors) - 1;
2919         priv->num_tc = priv->params.num_tc;
2920         priv->default_vlan_prio = priv->params.default_vlan_prio;
2921
2922         INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2923         INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2924         INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
2925 }
2926
2927 static int
2928 mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
2929                   struct mlx5_core_mr *mkey)
2930 {
2931         struct ifnet *ifp = priv->ifp;
2932         struct mlx5_core_dev *mdev = priv->mdev;
2933         int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2934         void *mkc;
2935         u32 *in;
2936         int err;
2937
2938         in = mlx5_vzalloc(inlen);
2939         if (in == NULL) {
2940                 if_printf(ifp, "%s: failed to allocate inbox\n", __func__);
2941                 return (-ENOMEM);
2942         }
2943
2944         mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2945         MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA);
2946         MLX5_SET(mkc, mkc, lw, 1);
2947         MLX5_SET(mkc, mkc, lr, 1);
2948
2949         MLX5_SET(mkc, mkc, pd, pdn);
2950         MLX5_SET(mkc, mkc, length64, 1);
2951         MLX5_SET(mkc, mkc, qpn, 0xffffff);
2952
2953         err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
2954         if (err)
2955                 if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n",
2956                     __func__, err);
2957
2958         kvfree(in);
2959         return (err);
2960 }
2961
2962 static const char *mlx5e_vport_stats_desc[] = {
2963         MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
2964 };
2965
2966 static const char *mlx5e_pport_stats_desc[] = {
2967         MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
2968 };
2969
2970 static void
2971 mlx5e_priv_mtx_init(struct mlx5e_priv *priv)
2972 {
2973         mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
2974         sx_init(&priv->state_lock, "mlx5state");
2975         callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
2976         MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
2977 }
2978
2979 static void
2980 mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv)
2981 {
2982         mtx_destroy(&priv->async_events_mtx);
2983         sx_destroy(&priv->state_lock);
2984 }
2985
2986 static int
2987 sysctl_firmware(SYSCTL_HANDLER_ARGS)
2988 {
2989         /*
2990          * %d.%d%.d the string format.
2991          * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
2992          * We need at most 5 chars to store that.
2993          * It also has: two "." and NULL at the end, which means we need 18
2994          * (5*3 + 3) chars at most.
2995          */
2996         char fw[18];
2997         struct mlx5e_priv *priv = arg1;
2998         int error;
2999
3000         snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
3001             fw_rev_sub(priv->mdev));
3002         error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
3003         return (error);
3004 }
3005
3006 static void
3007 mlx5e_disable_tx_dma(struct mlx5e_channel *ch)
3008 {
3009         int i;
3010
3011         for (i = 0; i < ch->num_tc; i++)
3012                 mlx5e_drain_sq(&ch->sq[i]);
3013 }
3014
3015 static void
3016 mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq)
3017 {
3018
3019         sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP);
3020         sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8);
3021         mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
3022         sq->doorbell.d64 = 0;
3023 }
3024
3025 void
3026 mlx5e_resume_sq(struct mlx5e_sq *sq)
3027 {
3028         int err;
3029
3030         /* check if already enabled */
3031         if (sq->stopped == 0)
3032                 return;
3033
3034         err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR,
3035             MLX5_SQC_STATE_RST);
3036         if (err != 0) {
3037                 if_printf(sq->ifp,
3038                     "mlx5e_modify_sq() from ERR to RST failed: %d\n", err);
3039         }
3040
3041         sq->cc = 0;
3042         sq->pc = 0;
3043
3044         /* reset doorbell prior to moving from RST to RDY */
3045         mlx5e_reset_sq_doorbell_record(sq);
3046
3047         err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST,
3048             MLX5_SQC_STATE_RDY);
3049         if (err != 0) {
3050                 if_printf(sq->ifp,
3051                     "mlx5e_modify_sq() from RST to RDY failed: %d\n", err);
3052         }
3053
3054         mtx_lock(&sq->lock);
3055         sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
3056         sq->stopped = 0;
3057         mtx_unlock(&sq->lock);
3058
3059 }
3060
3061 static void
3062 mlx5e_enable_tx_dma(struct mlx5e_channel *ch)
3063 {
3064         int i;
3065
3066         for (i = 0; i < ch->num_tc; i++)
3067                 mlx5e_resume_sq(&ch->sq[i]);
3068 }
3069
3070 static void
3071 mlx5e_disable_rx_dma(struct mlx5e_channel *ch)
3072 {
3073         struct mlx5e_rq *rq = &ch->rq;
3074         int err;
3075
3076         mtx_lock(&rq->mtx);
3077         rq->enabled = 0;
3078         callout_stop(&rq->watchdog);
3079         mtx_unlock(&rq->mtx);
3080
3081         callout_drain(&rq->watchdog);
3082
3083         err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
3084         if (err != 0) {
3085                 if_printf(rq->ifp,
3086                     "mlx5e_modify_rq() from RDY to RST failed: %d\n", err);
3087         }
3088
3089         while (!mlx5_wq_ll_is_empty(&rq->wq)) {
3090                 msleep(1);
3091                 rq->cq.mcq.comp(&rq->cq.mcq);
3092         }
3093
3094         /*
3095          * Transitioning into RST state will allow the FW to track less ERR state queues,
3096          * thus reducing the recv queue flushing time
3097          */
3098         err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST);
3099         if (err != 0) {
3100                 if_printf(rq->ifp,
3101                     "mlx5e_modify_rq() from ERR to RST failed: %d\n", err);
3102         }
3103 }
3104
3105 static void
3106 mlx5e_enable_rx_dma(struct mlx5e_channel *ch)
3107 {
3108         struct mlx5e_rq *rq = &ch->rq;
3109         int err;
3110
3111         rq->wq.wqe_ctr = 0;
3112         mlx5_wq_ll_update_db_record(&rq->wq);
3113         err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3114         if (err != 0) {
3115                 if_printf(rq->ifp,
3116                     "mlx5e_modify_rq() from RST to RDY failed: %d\n", err);
3117         }
3118
3119         rq->enabled = 1;
3120
3121         rq->cq.mcq.comp(&rq->cq.mcq);
3122 }
3123
3124 void
3125 mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value)
3126 {
3127         int i;
3128
3129         if (priv->channel == NULL)
3130                 return;
3131
3132         for (i = 0; i < priv->params.num_channels; i++) {
3133
3134                 if (!priv->channel[i])
3135                         continue;
3136
3137                 if (value)
3138                         mlx5e_disable_tx_dma(priv->channel[i]);
3139                 else
3140                         mlx5e_enable_tx_dma(priv->channel[i]);
3141         }
3142 }
3143
3144 void
3145 mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value)
3146 {
3147         int i;
3148
3149         if (priv->channel == NULL)
3150                 return;
3151
3152         for (i = 0; i < priv->params.num_channels; i++) {
3153
3154                 if (!priv->channel[i])
3155                         continue;
3156
3157                 if (value)
3158                         mlx5e_disable_rx_dma(priv->channel[i]);
3159                 else
3160                         mlx5e_enable_rx_dma(priv->channel[i]);
3161         }
3162 }
3163
3164 static void
3165 mlx5e_add_hw_stats(struct mlx5e_priv *priv)
3166 {
3167         SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3168             OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0,
3169             sysctl_firmware, "A", "HCA firmware version");
3170
3171         SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
3172             OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
3173             "Board ID");
3174 }
3175
3176 static int
3177 mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3178 {
3179         struct mlx5e_priv *priv = arg1;
3180         uint32_t tx_pfc;
3181         uint32_t value;
3182         int error;
3183
3184         PRIV_LOCK(priv);
3185
3186         tx_pfc = priv->params.tx_priority_flow_control;
3187
3188         /* get current value */
3189         value = (tx_pfc >> arg2) & 1;
3190
3191         error = sysctl_handle_32(oidp, &value, 0, req);
3192
3193         /* range check value */
3194         if (value != 0)
3195                 priv->params.tx_priority_flow_control |= (1 << arg2);
3196         else
3197                 priv->params.tx_priority_flow_control &= ~(1 << arg2);
3198
3199         /* check if update is required */
3200         if (error == 0 && priv->gone == 0 &&
3201             tx_pfc != priv->params.tx_priority_flow_control) {
3202                 error = -mlx5e_set_port_pfc(priv);
3203                 /* restore previous value */
3204                 if (error != 0)
3205                         priv->params.tx_priority_flow_control= tx_pfc;
3206         }
3207         PRIV_UNLOCK(priv);
3208
3209         return (error);
3210 }
3211
3212 static int
3213 mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS)
3214 {
3215         struct mlx5e_priv *priv = arg1;
3216         uint32_t rx_pfc;
3217         uint32_t value;
3218         int error;
3219
3220         PRIV_LOCK(priv);
3221
3222         rx_pfc = priv->params.rx_priority_flow_control;
3223
3224         /* get current value */
3225         value = (rx_pfc >> arg2) & 1;
3226
3227         error = sysctl_handle_32(oidp, &value, 0, req);
3228
3229         /* range check value */
3230         if (value != 0)
3231                 priv->params.rx_priority_flow_control |= (1 << arg2);
3232         else
3233                 priv->params.rx_priority_flow_control &= ~(1 << arg2);
3234
3235         /* check if update is required */
3236         if (error == 0 && priv->gone == 0 &&
3237             rx_pfc != priv->params.rx_priority_flow_control) {
3238                 error = -mlx5e_set_port_pfc(priv);
3239                 /* restore previous value */
3240                 if (error != 0)
3241                         priv->params.rx_priority_flow_control= rx_pfc;
3242         }
3243         PRIV_UNLOCK(priv);
3244
3245         return (error);
3246 }
3247
3248 static void
3249 mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
3250 {
3251         unsigned int x;
3252         char path[96];
3253         int error;
3254
3255         /* Only receiving pauseframes is enabled by default */
3256         priv->params.tx_pauseframe_control = 0;
3257         priv->params.rx_pauseframe_control = 1;
3258
3259         /* disable ports flow control, PFC, by default */
3260         priv->params.tx_priority_flow_control = 0;
3261         priv->params.rx_priority_flow_control = 0;
3262
3263 #if (__FreeBSD_version < 1100000)
3264         /* compute path for sysctl */
3265         snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control",
3266             device_get_unit(priv->mdev->pdev->dev.bsddev));
3267
3268         /* try to fetch tunable, if any */
3269         TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control);
3270
3271         /* compute path for sysctl */
3272         snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control",
3273             device_get_unit(priv->mdev->pdev->dev.bsddev));
3274
3275         /* try to fetch tunable, if any */
3276         TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control);
3277
3278         for (x = 0; x != 8; x++) {
3279
3280                 /* compute path for sysctl */
3281                 snprintf(path, sizeof(path), "dev.mce.%d.tx_priority_flow_control_%u",
3282                     device_get_unit(priv->mdev->pdev->dev.bsddev), x);
3283
3284                 /* try to fetch tunable, if any */
3285                 if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0)
3286                         priv->params.tx_priority_flow_control |= 1 << x;
3287
3288                 /* compute path for sysctl */
3289                 snprintf(path, sizeof(path), "dev.mce.%d.rx_priority_flow_control_%u",
3290                     device_get_unit(priv->mdev->pdev->dev.bsddev), x);
3291
3292                 /* try to fetch tunable, if any */
3293                 if (TUNABLE_INT_FETCH(path, &value) == 0 && value != 0)
3294                         priv->params.rx_priority_flow_control |= 1 << x;
3295         }
3296 #endif
3297
3298         /* register pauseframe SYSCTLs */
3299         SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3300             OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN,
3301             &priv->params.tx_pauseframe_control, 0,
3302             "Set to enable TX pause frames. Clear to disable.");
3303
3304         SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3305             OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN,
3306             &priv->params.rx_pauseframe_control, 0,
3307             "Set to enable RX pause frames. Clear to disable.");
3308
3309         /* register priority_flow control, PFC, SYSCTLs */
3310         for (x = 0; x != 8; x++) {
3311                 snprintf(path, sizeof(path), "tx_priority_flow_control_%u", x);
3312
3313                 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3314                     OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN |
3315                     CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_tx_priority_flow_control, "IU",
3316                     "Set to enable TX ports flow control frames for given priority. Clear to disable.");
3317
3318                 snprintf(path, sizeof(path), "rx_priority_flow_control_%u", x);
3319
3320                 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3321                     OID_AUTO, path, CTLTYPE_UINT | CTLFLAG_RWTUN |
3322                     CTLFLAG_MPSAFE, priv, x, &mlx5e_sysctl_rx_priority_flow_control, "IU",
3323                     "Set to enable RX ports flow control frames for given priority. Clear to disable.");
3324         }
3325
3326         PRIV_LOCK(priv);
3327
3328         /* range check */
3329         priv->params.tx_pauseframe_control =
3330             priv->params.tx_pauseframe_control ? 1 : 0;
3331         priv->params.rx_pauseframe_control =
3332             priv->params.rx_pauseframe_control ? 1 : 0;
3333
3334         /* update firmware */
3335         error = mlx5e_set_port_pause_and_pfc(priv);
3336         if (error == -EINVAL) {
3337                 if_printf(priv->ifp,
3338                     "Global pauseframes must be disabled before enabling PFC.\n");
3339                 priv->params.rx_priority_flow_control = 0;
3340                 priv->params.tx_priority_flow_control = 0;
3341
3342                 /* update firmware */
3343                 (void) mlx5e_set_port_pause_and_pfc(priv);
3344         }
3345         PRIV_UNLOCK(priv);
3346 }
3347
3348 static void *
3349 mlx5e_create_ifp(struct mlx5_core_dev *mdev)
3350 {
3351         struct ifnet *ifp;
3352         struct mlx5e_priv *priv;
3353         u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
3354         struct sysctl_oid_list *child;
3355         int ncv = mdev->priv.eq_table.num_comp_vectors;
3356         char unit[16];
3357         int err;
3358         int i;
3359         u32 eth_proto_cap;
3360
3361         if (mlx5e_check_required_hca_cap(mdev)) {
3362                 mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
3363                 return (NULL);
3364         }
3365         priv = malloc(sizeof(*priv), M_MLX5EN, M_WAITOK | M_ZERO);
3366         mlx5e_priv_mtx_init(priv);
3367
3368         ifp = priv->ifp = if_alloc(IFT_ETHER);
3369         if (ifp == NULL) {
3370                 mlx5_core_err(mdev, "if_alloc() failed\n");
3371                 goto err_free_priv;
3372         }
3373         ifp->if_softc = priv;
3374         if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev));
3375         ifp->if_mtu = ETHERMTU;
3376         ifp->if_init = mlx5e_open;
3377         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3378         ifp->if_ioctl = mlx5e_ioctl;
3379         ifp->if_transmit = mlx5e_xmit;
3380         ifp->if_qflush = if_qflush;
3381 #if (__FreeBSD_version >= 1100000)
3382         ifp->if_get_counter = mlx5e_get_counter;
3383 #endif
3384         ifp->if_snd.ifq_maxlen = ifqmaxlen;
3385         /*
3386          * Set driver features
3387          */
3388         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
3389         ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
3390         ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
3391         ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
3392         ifp->if_capabilities |= IFCAP_LRO;
3393         ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
3394         ifp->if_capabilities |= IFCAP_HWSTATS;
3395
3396         /* set TSO limits so that we don't have to drop TX packets */
3397         ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
3398         ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
3399         ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
3400
3401         ifp->if_capenable = ifp->if_capabilities;
3402         ifp->if_hwassist = 0;
3403         if (ifp->if_capenable & IFCAP_TSO)
3404                 ifp->if_hwassist |= CSUM_TSO;
3405         if (ifp->if_capenable & IFCAP_TXCSUM)
3406                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
3407         if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3408                 ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
3409
3410         /* ifnet sysctl tree */
3411         sysctl_ctx_init(&priv->sysctl_ctx);
3412         priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
3413             OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name");
3414         if (priv->sysctl_ifnet == NULL) {
3415                 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3416                 goto err_free_sysctl;
3417         }
3418         snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
3419         priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3420             OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit");
3421         if (priv->sysctl_ifnet == NULL) {
3422                 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3423                 goto err_free_sysctl;
3424         }
3425
3426         /* HW sysctl tree */
3427         child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
3428         priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
3429             OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw");
3430         if (priv->sysctl_hw == NULL) {
3431                 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
3432                 goto err_free_sysctl;
3433         }
3434         mlx5e_build_ifp_priv(mdev, priv, ncv);
3435
3436         snprintf(unit, sizeof(unit), "mce%u_wq",
3437             device_get_unit(mdev->pdev->dev.bsddev));
3438         priv->wq = alloc_workqueue(unit, 0, 1);
3439         if (priv->wq == NULL) {
3440                 if_printf(ifp, "%s: alloc_workqueue failed\n", __func__);
3441                 goto err_free_sysctl;
3442         }
3443
3444         err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
3445         if (err) {
3446                 if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n",
3447                     __func__, err);
3448                 goto err_free_wq;
3449         }
3450         err = mlx5_core_alloc_pd(mdev, &priv->pdn);
3451         if (err) {
3452                 if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n",
3453                     __func__, err);
3454                 goto err_unmap_free_uar;
3455         }
3456         err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
3457         if (err) {
3458                 if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n",
3459                     __func__, err);
3460                 goto err_dealloc_pd;
3461         }
3462         err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
3463         if (err) {
3464                 if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n",
3465                     __func__, err);
3466                 goto err_dealloc_transport_domain;
3467         }
3468         mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
3469
3470         /* check if we should generate a random MAC address */
3471         if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 &&
3472             is_zero_ether_addr(dev_addr)) {
3473                 random_ether_addr(dev_addr);
3474                 if_printf(ifp, "Assigned random MAC address\n");
3475         }
3476
3477         /* set default MTU */
3478         mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
3479
3480         /* Set desc */
3481         device_set_desc(mdev->pdev->dev.bsddev, mlx5e_version);
3482
3483         /* Set default media status */
3484         priv->media_status_last = IFM_AVALID;
3485         priv->media_active_last = IFM_ETHER | IFM_AUTO |
3486             IFM_ETH_RXPAUSE | IFM_FDX;
3487
3488         /* setup default pauseframes configuration */
3489         mlx5e_setup_pauseframes(priv);
3490
3491         err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
3492         if (err) {
3493                 eth_proto_cap = 0;
3494                 if_printf(ifp, "%s: Query port media capability failed, %d\n",
3495                     __func__, err);
3496         }
3497
3498         /* Setup supported medias */
3499         ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
3500             mlx5e_media_change, mlx5e_media_status);
3501
3502         for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
3503                 if (mlx5e_mode_table[i].baudrate == 0)
3504                         continue;
3505                 if (MLX5E_PROT_MASK(i) & eth_proto_cap) {
3506                         ifmedia_add(&priv->media,
3507                             mlx5e_mode_table[i].subtype |
3508                             IFM_ETHER, 0, NULL);
3509                         ifmedia_add(&priv->media,
3510                             mlx5e_mode_table[i].subtype |
3511                             IFM_ETHER | IFM_FDX |
3512                             IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3513                 }
3514         }
3515
3516         ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3517         ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3518             IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
3519
3520         /* Set autoselect by default */
3521         ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
3522             IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
3523         ether_ifattach(ifp, dev_addr);
3524
3525         /* Register for VLAN events */
3526         priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
3527             mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
3528         priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
3529             mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
3530
3531         /* Link is down by default */
3532         if_link_state_change(ifp, LINK_STATE_DOWN);
3533
3534         mlx5e_enable_async_events(priv);
3535
3536         mlx5e_add_hw_stats(priv);
3537
3538         mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3539             "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
3540             priv->stats.vport.arg);
3541
3542         mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
3543             "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
3544             priv->stats.pport.arg);
3545
3546         mlx5e_create_ethtool(priv);
3547
3548         mtx_lock(&priv->async_events_mtx);
3549         mlx5e_update_stats(priv);
3550         mtx_unlock(&priv->async_events_mtx);
3551
3552         return (priv);
3553
3554 err_dealloc_transport_domain:
3555         mlx5_dealloc_transport_domain(mdev, priv->tdn);
3556
3557 err_dealloc_pd:
3558         mlx5_core_dealloc_pd(mdev, priv->pdn);
3559
3560 err_unmap_free_uar:
3561         mlx5_unmap_free_uar(mdev, &priv->cq_uar);
3562
3563 err_free_wq:
3564         destroy_workqueue(priv->wq);
3565
3566 err_free_sysctl:
3567         sysctl_ctx_free(&priv->sysctl_ctx);
3568
3569         if_free(ifp);
3570
3571 err_free_priv:
3572         mlx5e_priv_mtx_destroy(priv);
3573         free(priv, M_MLX5EN);
3574         return (NULL);
3575 }
3576
3577 static void
3578 mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
3579 {
3580         struct mlx5e_priv *priv = vpriv;
3581         struct ifnet *ifp = priv->ifp;
3582
3583         /* don't allow more IOCTLs */
3584         priv->gone = 1;
3585
3586         /*
3587          * Clear the device description to avoid use after free,
3588          * because the bsddev is not destroyed when this module is
3589          * unloaded:
3590          */
3591         device_set_desc(mdev->pdev->dev.bsddev, NULL);
3592
3593         /* XXX wait a bit to allow IOCTL handlers to complete */
3594         pause("W", hz);
3595
3596         /* stop watchdog timer */
3597         callout_drain(&priv->watchdog);
3598
3599         if (priv->vlan_attach != NULL)
3600                 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
3601         if (priv->vlan_detach != NULL)
3602                 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
3603
3604         /* make sure device gets closed */
3605         PRIV_LOCK(priv);
3606         mlx5e_close_locked(ifp);
3607         PRIV_UNLOCK(priv);
3608
3609         /* unregister device */
3610         ifmedia_removeall(&priv->media);
3611         ether_ifdetach(ifp);
3612         if_free(ifp);
3613
3614         /* destroy all remaining sysctl nodes */
3615         if (priv->sysctl_debug)
3616                 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
3617         sysctl_ctx_free(&priv->stats.vport.ctx);
3618         sysctl_ctx_free(&priv->stats.pport.ctx);
3619         sysctl_ctx_free(&priv->sysctl_ctx);
3620
3621         mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
3622         mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
3623         mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
3624         mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
3625         mlx5e_disable_async_events(priv);
3626         destroy_workqueue(priv->wq);
3627         mlx5e_priv_mtx_destroy(priv);
3628         free(priv, M_MLX5EN);
3629 }
3630
3631 static void *
3632 mlx5e_get_ifp(void *vpriv)
3633 {
3634         struct mlx5e_priv *priv = vpriv;
3635
3636         return (priv->ifp);
3637 }
3638
3639 static struct mlx5_interface mlx5e_interface = {
3640         .add = mlx5e_create_ifp,
3641         .remove = mlx5e_destroy_ifp,
3642         .event = mlx5e_async_event,
3643         .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
3644         .get_dev = mlx5e_get_ifp,
3645 };
3646
3647 void
3648 mlx5e_init(void)
3649 {
3650         mlx5_register_interface(&mlx5e_interface);
3651 }
3652
3653 void
3654 mlx5e_cleanup(void)
3655 {
3656         mlx5_unregister_interface(&mlx5e_interface);
3657 }
3658
3659 module_init_order(mlx5e_init, SI_ORDER_THIRD);
3660 module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD);
3661
3662 #if (__FreeBSD_version >= 1100000)
3663 MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
3664 #endif
3665 MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
3666 MODULE_VERSION(mlx5en, 1);