2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/sockio.h>
31 #include <machine/atomic.h>
33 #define ETH_DRIVER_VERSION "3.1.0-dev"
34 char mlx5e_version[] = "Mellanox Ethernet driver"
35 " (" ETH_DRIVER_VERSION ")";
37 struct mlx5e_rq_param {
38 u32 rqc [MLX5_ST_SZ_DW(rqc)];
39 struct mlx5_wq_param wq;
42 struct mlx5e_sq_param {
43 u32 sqc [MLX5_ST_SZ_DW(sqc)];
44 struct mlx5_wq_param wq;
47 struct mlx5e_cq_param {
48 u32 cqc [MLX5_ST_SZ_DW(cqc)];
49 struct mlx5_wq_param wq;
53 struct mlx5e_channel_param {
54 struct mlx5e_rq_param rq;
55 struct mlx5e_sq_param sq;
56 struct mlx5e_cq_param rx_cq;
57 struct mlx5e_cq_param tx_cq;
63 } mlx5e_mode_table[MLX5E_LINK_MODES_NUMBER] = {
65 [MLX5E_1000BASE_CX_SGMII] = {
66 .subtype = IFM_1000_CX_SGMII,
67 .baudrate = IF_Mbps(1000ULL),
69 [MLX5E_1000BASE_KX] = {
70 .subtype = IFM_1000_KX,
71 .baudrate = IF_Mbps(1000ULL),
73 [MLX5E_10GBASE_CX4] = {
74 .subtype = IFM_10G_CX4,
75 .baudrate = IF_Gbps(10ULL),
77 [MLX5E_10GBASE_KX4] = {
78 .subtype = IFM_10G_KX4,
79 .baudrate = IF_Gbps(10ULL),
81 [MLX5E_10GBASE_KR] = {
82 .subtype = IFM_10G_KR,
83 .baudrate = IF_Gbps(10ULL),
85 [MLX5E_20GBASE_KR2] = {
86 .subtype = IFM_20G_KR2,
87 .baudrate = IF_Gbps(20ULL),
89 [MLX5E_40GBASE_CR4] = {
90 .subtype = IFM_40G_CR4,
91 .baudrate = IF_Gbps(40ULL),
93 [MLX5E_40GBASE_KR4] = {
94 .subtype = IFM_40G_KR4,
95 .baudrate = IF_Gbps(40ULL),
97 [MLX5E_56GBASE_R4] = {
98 .subtype = IFM_56G_R4,
99 .baudrate = IF_Gbps(56ULL),
101 [MLX5E_10GBASE_CR] = {
102 .subtype = IFM_10G_CR1,
103 .baudrate = IF_Gbps(10ULL),
105 [MLX5E_10GBASE_SR] = {
106 .subtype = IFM_10G_SR,
107 .baudrate = IF_Gbps(10ULL),
109 [MLX5E_10GBASE_ER] = {
110 .subtype = IFM_10G_ER,
111 .baudrate = IF_Gbps(10ULL),
113 [MLX5E_40GBASE_SR4] = {
114 .subtype = IFM_40G_SR4,
115 .baudrate = IF_Gbps(40ULL),
117 [MLX5E_40GBASE_LR4] = {
118 .subtype = IFM_40G_LR4,
119 .baudrate = IF_Gbps(40ULL),
121 [MLX5E_100GBASE_CR4] = {
122 .subtype = IFM_100G_CR4,
123 .baudrate = IF_Gbps(100ULL),
125 [MLX5E_100GBASE_SR4] = {
126 .subtype = IFM_100G_SR4,
127 .baudrate = IF_Gbps(100ULL),
129 [MLX5E_100GBASE_KR4] = {
130 .subtype = IFM_100G_KR4,
131 .baudrate = IF_Gbps(100ULL),
133 [MLX5E_100GBASE_LR4] = {
134 .subtype = IFM_100G_LR4,
135 .baudrate = IF_Gbps(100ULL),
137 [MLX5E_100BASE_TX] = {
138 .subtype = IFM_100_TX,
139 .baudrate = IF_Mbps(100ULL),
141 [MLX5E_100BASE_T] = {
142 .subtype = IFM_100_T,
143 .baudrate = IF_Mbps(100ULL),
145 [MLX5E_10GBASE_T] = {
146 .subtype = IFM_10G_T,
147 .baudrate = IF_Gbps(10ULL),
149 [MLX5E_25GBASE_CR] = {
150 .subtype = IFM_25G_CR,
151 .baudrate = IF_Gbps(25ULL),
153 [MLX5E_25GBASE_KR] = {
154 .subtype = IFM_25G_KR,
155 .baudrate = IF_Gbps(25ULL),
157 [MLX5E_25GBASE_SR] = {
158 .subtype = IFM_25G_SR,
159 .baudrate = IF_Gbps(25ULL),
161 [MLX5E_50GBASE_CR2] = {
162 .subtype = IFM_50G_CR2,
163 .baudrate = IF_Gbps(50ULL),
165 [MLX5E_50GBASE_KR2] = {
166 .subtype = IFM_50G_KR2,
167 .baudrate = IF_Gbps(50ULL),
171 MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
174 mlx5e_update_carrier(struct mlx5e_priv *priv)
176 struct mlx5_core_dev *mdev = priv->mdev;
177 u32 out[MLX5_ST_SZ_DW(ptys_reg)];
183 port_state = mlx5_query_vport_state(mdev,
184 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
186 if (port_state == VPORT_STATE_UP) {
187 priv->media_status_last |= IFM_ACTIVE;
189 priv->media_status_last &= ~IFM_ACTIVE;
190 priv->media_active_last = IFM_ETHER;
191 if_link_state_change(priv->ifp, LINK_STATE_DOWN);
195 error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN);
197 priv->media_active_last = IFM_ETHER;
198 priv->ifp->if_baudrate = 1;
199 if_printf(priv->ifp, "%s: query port ptys failed: 0x%x\n",
203 eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
205 for (i = 0; i != MLX5E_LINK_MODES_NUMBER; i++) {
206 if (mlx5e_mode_table[i].baudrate == 0)
208 if (MLX5E_PROT_MASK(i) & eth_proto_oper) {
209 priv->ifp->if_baudrate =
210 mlx5e_mode_table[i].baudrate;
211 priv->media_active_last =
212 mlx5e_mode_table[i].subtype | IFM_ETHER | IFM_FDX;
215 if_link_state_change(priv->ifp, LINK_STATE_UP);
219 mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
221 struct mlx5e_priv *priv = dev->if_softc;
223 ifmr->ifm_status = priv->media_status_last;
224 ifmr->ifm_active = priv->media_active_last |
225 (priv->params_ethtool.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
226 (priv->params_ethtool.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
231 mlx5e_find_link_mode(u32 subtype)
236 for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
237 if (mlx5e_mode_table[i].baudrate == 0)
239 if (mlx5e_mode_table[i].subtype == subtype)
240 link_mode |= MLX5E_PROT_MASK(i);
247 mlx5e_media_change(struct ifnet *dev)
249 struct mlx5e_priv *priv = dev->if_softc;
250 struct mlx5_core_dev *mdev = priv->mdev;
256 locked = PRIV_LOCKED(priv);
260 if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
264 link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media));
266 error = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN);
268 if_printf(dev, "Query port media capability failed\n");
271 if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO)
272 link_mode = eth_proto_cap;
274 link_mode = link_mode & eth_proto_cap;
277 if_printf(dev, "Not supported link mode requested\n");
281 mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
282 mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN);
283 mlx5_set_port_status(mdev, MLX5_PORT_UP);
292 mlx5e_update_carrier_work(struct work_struct *work)
294 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
295 update_carrier_work);
298 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
299 mlx5e_update_carrier(priv);
304 mlx5e_update_pport_counters(struct mlx5e_priv *priv)
306 struct mlx5_core_dev *mdev = priv->mdev;
307 struct mlx5e_pport_stats *s = &priv->stats.pport;
308 struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
312 unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
316 in = mlx5_vzalloc(sz);
317 out = mlx5_vzalloc(sz);
318 if (in == NULL || out == NULL)
321 ptr = (uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
323 MLX5_SET(ppcnt_reg, in, local_port, 1);
325 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
326 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
327 for (x = y = 0; x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
328 s->arg[y] = be64toh(ptr[x]);
330 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
331 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
332 for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
333 s->arg[y] = be64toh(ptr[x]);
334 for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
335 MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
336 s_debug->arg[y] = be64toh(ptr[x]);
338 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
339 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
340 for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
341 s_debug->arg[y] = be64toh(ptr[x]);
343 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
344 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
345 for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
346 s_debug->arg[y] = be64toh(ptr[x]);
353 mlx5e_update_stats_work(struct work_struct *work)
355 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
357 struct mlx5_core_dev *mdev = priv->mdev;
358 struct mlx5e_vport_stats *s = &priv->stats.vport;
359 struct mlx5e_rq_stats *rq_stats;
360 struct mlx5e_sq_stats *sq_stats;
361 struct buf_ring *sq_br;
362 #if (__FreeBSD_version < 1100000)
363 struct ifnet *ifp = priv->ifp;
366 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
368 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
371 u64 tx_queue_dropped = 0;
372 u64 tx_defragged = 0;
373 u64 tx_offload_none = 0;
376 u64 sw_lro_queued = 0;
377 u64 sw_lro_flushed = 0;
378 u64 rx_csum_none = 0;
380 u32 rx_out_of_buffer = 0;
385 out = mlx5_vzalloc(outlen);
388 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
391 /* Collect firts the SW counters and then HW for consistency */
392 for (i = 0; i < priv->params.num_channels; i++) {
393 struct mlx5e_rq *rq = &priv->channel[i]->rq;
395 rq_stats = &priv->channel[i]->rq.stats;
397 /* collect stats from LRO */
398 rq_stats->sw_lro_queued = rq->lro.lro_queued;
399 rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
400 sw_lro_queued += rq_stats->sw_lro_queued;
401 sw_lro_flushed += rq_stats->sw_lro_flushed;
402 lro_packets += rq_stats->lro_packets;
403 lro_bytes += rq_stats->lro_bytes;
404 rx_csum_none += rq_stats->csum_none;
405 rx_wqe_err += rq_stats->wqe_err;
407 for (j = 0; j < priv->num_tc; j++) {
408 sq_stats = &priv->channel[i]->sq[j].stats;
409 sq_br = priv->channel[i]->sq[j].br;
411 tso_packets += sq_stats->tso_packets;
412 tso_bytes += sq_stats->tso_bytes;
413 tx_queue_dropped += sq_stats->dropped;
414 tx_queue_dropped += sq_br->br_drops;
415 tx_defragged += sq_stats->defragged;
416 tx_offload_none += sq_stats->csum_offload_none;
420 /* update counters */
421 s->tso_packets = tso_packets;
422 s->tso_bytes = tso_bytes;
423 s->tx_queue_dropped = tx_queue_dropped;
424 s->tx_defragged = tx_defragged;
425 s->lro_packets = lro_packets;
426 s->lro_bytes = lro_bytes;
427 s->sw_lro_queued = sw_lro_queued;
428 s->sw_lro_flushed = sw_lro_flushed;
429 s->rx_csum_none = rx_csum_none;
430 s->rx_wqe_err = rx_wqe_err;
433 memset(in, 0, sizeof(in));
435 MLX5_SET(query_vport_counter_in, in, opcode,
436 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
437 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
438 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
440 memset(out, 0, outlen);
442 /* get number of out-of-buffer drops first */
443 if (mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
447 /* accumulate difference into a 64-bit counter */
448 s->rx_out_of_buffer += (u64)(u32)(rx_out_of_buffer - s->rx_out_of_buffer_prev);
449 s->rx_out_of_buffer_prev = rx_out_of_buffer;
451 /* get port statistics */
452 if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
455 #define MLX5_GET_CTR(out, x) \
456 MLX5_GET64(query_vport_counter_out, out, x)
458 s->rx_error_packets =
459 MLX5_GET_CTR(out, received_errors.packets);
461 MLX5_GET_CTR(out, received_errors.octets);
462 s->tx_error_packets =
463 MLX5_GET_CTR(out, transmit_errors.packets);
465 MLX5_GET_CTR(out, transmit_errors.octets);
467 s->rx_unicast_packets =
468 MLX5_GET_CTR(out, received_eth_unicast.packets);
469 s->rx_unicast_bytes =
470 MLX5_GET_CTR(out, received_eth_unicast.octets);
471 s->tx_unicast_packets =
472 MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
473 s->tx_unicast_bytes =
474 MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
476 s->rx_multicast_packets =
477 MLX5_GET_CTR(out, received_eth_multicast.packets);
478 s->rx_multicast_bytes =
479 MLX5_GET_CTR(out, received_eth_multicast.octets);
480 s->tx_multicast_packets =
481 MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
482 s->tx_multicast_bytes =
483 MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
485 s->rx_broadcast_packets =
486 MLX5_GET_CTR(out, received_eth_broadcast.packets);
487 s->rx_broadcast_bytes =
488 MLX5_GET_CTR(out, received_eth_broadcast.octets);
489 s->tx_broadcast_packets =
490 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
491 s->tx_broadcast_bytes =
492 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
495 s->rx_unicast_packets +
496 s->rx_multicast_packets +
497 s->rx_broadcast_packets -
500 s->rx_unicast_bytes +
501 s->rx_multicast_bytes +
502 s->rx_broadcast_bytes;
504 s->tx_unicast_packets +
505 s->tx_multicast_packets +
506 s->tx_broadcast_packets;
508 s->tx_unicast_bytes +
509 s->tx_multicast_bytes +
510 s->tx_broadcast_bytes;
512 /* Update calculated offload counters */
513 s->tx_csum_offload = s->tx_packets - tx_offload_none;
514 s->rx_csum_good = s->rx_packets - s->rx_csum_none;
516 /* Update per port counters */
517 mlx5e_update_pport_counters(priv);
519 #if (__FreeBSD_version < 1100000)
520 /* no get_counters interface in fbsd 10 */
521 ifp->if_ipackets = s->rx_packets;
522 ifp->if_ierrors = s->rx_error_packets;
523 ifp->if_iqdrops = s->rx_out_of_buffer;
524 ifp->if_opackets = s->tx_packets;
525 ifp->if_oerrors = s->tx_error_packets;
526 ifp->if_snd.ifq_drops = s->tx_queue_dropped;
527 ifp->if_ibytes = s->rx_bytes;
528 ifp->if_obytes = s->tx_bytes;
537 mlx5e_update_stats(void *arg)
539 struct mlx5e_priv *priv = arg;
541 schedule_work(&priv->update_stats_work);
543 callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv);
547 mlx5e_async_event_sub(struct mlx5e_priv *priv,
548 enum mlx5_dev_event event)
551 case MLX5_DEV_EVENT_PORT_UP:
552 case MLX5_DEV_EVENT_PORT_DOWN:
553 schedule_work(&priv->update_carrier_work);
562 mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
563 enum mlx5_dev_event event, unsigned long param)
565 struct mlx5e_priv *priv = vpriv;
567 mtx_lock(&priv->async_events_mtx);
568 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
569 mlx5e_async_event_sub(priv, event);
570 mtx_unlock(&priv->async_events_mtx);
574 mlx5e_enable_async_events(struct mlx5e_priv *priv)
576 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
580 mlx5e_disable_async_events(struct mlx5e_priv *priv)
582 mtx_lock(&priv->async_events_mtx);
583 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
584 mtx_unlock(&priv->async_events_mtx);
587 static const char *mlx5e_rq_stats_desc[] = {
588 MLX5E_RQ_STATS(MLX5E_STATS_DESC)
592 mlx5e_create_rq(struct mlx5e_channel *c,
593 struct mlx5e_rq_param *param,
596 struct mlx5e_priv *priv = c->priv;
597 struct mlx5_core_dev *mdev = priv->mdev;
599 void *rqc = param->rqc;
600 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
605 /* Create DMA descriptor TAG */
606 if ((err = -bus_dma_tag_create(
607 bus_get_dma_tag(mdev->pdev->dev.bsddev),
608 1, /* any alignment */
610 BUS_SPACE_MAXADDR, /* lowaddr */
611 BUS_SPACE_MAXADDR, /* highaddr */
612 NULL, NULL, /* filter, filterarg */
613 MJUM16BYTES, /* maxsize */
615 MJUM16BYTES, /* maxsegsize */
617 NULL, NULL, /* lockfunc, lockfuncarg */
621 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq,
624 goto err_free_dma_tag;
626 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
628 if (priv->params.hw_lro_en) {
629 rq->wqe_sz = priv->params.lro_wqe_sz;
631 rq->wqe_sz = MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
633 if (rq->wqe_sz > MJUM16BYTES) {
635 goto err_rq_wq_destroy;
636 } else if (rq->wqe_sz > MJUM9BYTES) {
637 rq->wqe_sz = MJUM16BYTES;
638 } else if (rq->wqe_sz > MJUMPAGESIZE) {
639 rq->wqe_sz = MJUM9BYTES;
640 } else if (rq->wqe_sz > MCLBYTES) {
641 rq->wqe_sz = MJUMPAGESIZE;
643 rq->wqe_sz = MCLBYTES;
646 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
647 rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
648 if (rq->mbuf == NULL) {
650 goto err_rq_wq_destroy;
652 for (i = 0; i != wq_sz; i++) {
653 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
654 uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
656 err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
659 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
660 goto err_rq_mbuf_free;
662 wqe->data.lkey = c->mkey_be;
663 wqe->data.byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
671 snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
672 mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
673 buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
676 #ifdef HAVE_TURBO_LRO
677 if (tcp_tlro_init(&rq->lro, c->ifp, MLX5E_BUDGET_MAX) != 0)
680 if (tcp_lro_init(&rq->lro))
683 rq->lro.ifp = c->ifp;
688 free(rq->mbuf, M_MLX5EN);
690 mlx5_wq_destroy(&rq->wq_ctrl);
692 bus_dma_tag_destroy(rq->dma_tag);
698 mlx5e_destroy_rq(struct mlx5e_rq *rq)
703 /* destroy all sysctl nodes */
704 sysctl_ctx_free(&rq->stats.ctx);
706 /* free leftover LRO packets, if any */
707 #ifdef HAVE_TURBO_LRO
708 tcp_tlro_free(&rq->lro);
710 tcp_lro_free(&rq->lro);
712 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
713 for (i = 0; i != wq_sz; i++) {
714 if (rq->mbuf[i].mbuf != NULL) {
715 bus_dmamap_unload(rq->dma_tag,
716 rq->mbuf[i].dma_map);
717 m_freem(rq->mbuf[i].mbuf);
719 bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
721 free(rq->mbuf, M_MLX5EN);
722 mlx5_wq_destroy(&rq->wq_ctrl);
726 mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
728 struct mlx5e_channel *c = rq->channel;
729 struct mlx5e_priv *priv = c->priv;
730 struct mlx5_core_dev *mdev = priv->mdev;
738 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
739 sizeof(u64) * rq->wq_ctrl.buf.npages;
740 in = mlx5_vzalloc(inlen);
744 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
745 wq = MLX5_ADDR_OF(rqc, rqc, wq);
747 memcpy(rqc, param->rqc, sizeof(param->rqc));
749 MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
750 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
751 MLX5_SET(rqc, rqc, flush_in_error_en, 1);
752 if (priv->counter_set_id >= 0)
753 MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
754 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
756 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
758 mlx5_fill_page_array(&rq->wq_ctrl.buf,
759 (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
761 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
769 mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
771 struct mlx5e_channel *c = rq->channel;
772 struct mlx5e_priv *priv = c->priv;
773 struct mlx5_core_dev *mdev = priv->mdev;
780 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
781 in = mlx5_vzalloc(inlen);
785 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
787 MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
788 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
789 MLX5_SET(rqc, rqc, state, next_state);
791 err = mlx5_core_modify_rq(mdev, in, inlen);
799 mlx5e_disable_rq(struct mlx5e_rq *rq)
801 struct mlx5e_channel *c = rq->channel;
802 struct mlx5e_priv *priv = c->priv;
803 struct mlx5_core_dev *mdev = priv->mdev;
805 mlx5_core_destroy_rq(mdev, rq->rqn);
809 mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
811 struct mlx5e_channel *c = rq->channel;
812 struct mlx5e_priv *priv = c->priv;
813 struct mlx5_wq_ll *wq = &rq->wq;
816 for (i = 0; i < 1000; i++) {
817 if (wq->cur_sz >= priv->params.min_rx_wqes)
826 mlx5e_open_rq(struct mlx5e_channel *c,
827 struct mlx5e_rq_param *param,
833 err = mlx5e_create_rq(c, param, rq);
837 err = mlx5e_enable_rq(rq, param);
841 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
848 * Test send queues, which will trigger
849 * "mlx5e_post_rx_wqes()":
851 for (i = 0; i != c->num_tc; i++)
852 mlx5e_send_nop(&c->sq[i], 1, true);
856 mlx5e_disable_rq(rq);
858 mlx5e_destroy_rq(rq);
864 mlx5e_close_rq(struct mlx5e_rq *rq)
867 mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
871 mlx5e_close_rq_wait(struct mlx5e_rq *rq)
873 /* wait till RQ is empty */
874 while (!mlx5_wq_ll_is_empty(&rq->wq)) {
876 rq->cq.mcq.comp(&rq->cq.mcq);
879 mlx5e_disable_rq(rq);
880 mlx5e_destroy_rq(rq);
884 mlx5e_free_sq_db(struct mlx5e_sq *sq)
886 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
889 for (x = 0; x != wq_sz; x++)
890 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
891 free(sq->mbuf, M_MLX5EN);
895 mlx5e_alloc_sq_db(struct mlx5e_sq *sq)
897 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
901 sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
902 if (sq->mbuf == NULL)
905 /* Create DMA descriptor MAPs */
906 for (x = 0; x != wq_sz; x++) {
907 err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
910 bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
911 free(sq->mbuf, M_MLX5EN);
918 static const char *mlx5e_sq_stats_desc[] = {
919 MLX5E_SQ_STATS(MLX5E_STATS_DESC)
923 mlx5e_create_sq(struct mlx5e_channel *c,
925 struct mlx5e_sq_param *param,
928 struct mlx5e_priv *priv = c->priv;
929 struct mlx5_core_dev *mdev = priv->mdev;
932 void *sqc = param->sqc;
933 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
940 /* Create DMA descriptor TAG */
941 if ((err = -bus_dma_tag_create(
942 bus_get_dma_tag(mdev->pdev->dev.bsddev),
943 1, /* any alignment */
945 BUS_SPACE_MAXADDR, /* lowaddr */
946 BUS_SPACE_MAXADDR, /* highaddr */
947 NULL, NULL, /* filter, filterarg */
948 MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */
949 MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */
950 MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */
952 NULL, NULL, /* lockfunc, lockfuncarg */
956 err = mlx5_alloc_map_uar(mdev, &sq->uar);
958 goto err_free_dma_tag;
960 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq,
963 goto err_unmap_free_uar;
965 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
966 sq->uar_map = sq->uar.map;
967 sq->uar_bf_map = sq->uar.bf_map;
968 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
970 err = mlx5e_alloc_sq_db(sq);
972 goto err_sq_wq_destroy;
975 sq->mkey_be = c->mkey_be;
979 sq->br = buf_ring_alloc(MLX5E_SQ_TX_QUEUE_SIZE, M_MLX5EN,
980 M_WAITOK, &sq->lock);
981 if (sq->br == NULL) {
982 if_printf(c->ifp, "%s: Failed allocating sq drbr buffer\n",
988 sq->sq_tq = taskqueue_create_fast("mlx5e_que", M_WAITOK,
989 taskqueue_thread_enqueue, &sq->sq_tq);
990 if (sq->sq_tq == NULL) {
991 if_printf(c->ifp, "%s: Failed allocating taskqueue\n",
997 TASK_INIT(&sq->sq_task, 0, mlx5e_tx_que, sq);
999 cpu_id = rss_getcpu(c->ix % rss_getnumbuckets());
1000 CPU_SETOF(cpu_id, &cpu_mask);
1001 taskqueue_start_threads_cpuset(&sq->sq_tq, 1, PI_NET, &cpu_mask,
1002 "%s TX SQ%d.%d CPU%d", c->ifp->if_xname, c->ix, tc, cpu_id);
1004 taskqueue_start_threads(&sq->sq_tq, 1, PI_NET,
1005 "%s TX SQ%d.%d", c->ifp->if_xname, c->ix, tc);
1007 snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
1008 mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
1009 buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
1015 buf_ring_free(sq->br, M_MLX5EN);
1017 mlx5e_free_sq_db(sq);
1019 mlx5_wq_destroy(&sq->wq_ctrl);
1022 mlx5_unmap_free_uar(mdev, &sq->uar);
1025 bus_dma_tag_destroy(sq->dma_tag);
1031 mlx5e_destroy_sq(struct mlx5e_sq *sq)
1033 struct mlx5e_channel *c = sq->channel;
1034 struct mlx5e_priv *priv = c->priv;
1036 /* destroy all sysctl nodes */
1037 sysctl_ctx_free(&sq->stats.ctx);
1039 mlx5e_free_sq_db(sq);
1040 mlx5_wq_destroy(&sq->wq_ctrl);
1041 mlx5_unmap_free_uar(priv->mdev, &sq->uar);
1042 taskqueue_drain(sq->sq_tq, &sq->sq_task);
1043 taskqueue_free(sq->sq_tq);
1044 buf_ring_free(sq->br, M_MLX5EN);
1048 mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
1050 struct mlx5e_channel *c = sq->channel;
1051 struct mlx5e_priv *priv = c->priv;
1052 struct mlx5_core_dev *mdev = priv->mdev;
1060 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1061 sizeof(u64) * sq->wq_ctrl.buf.npages;
1062 in = mlx5_vzalloc(inlen);
1066 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1067 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1069 memcpy(sqc, param->sqc, sizeof(param->sqc));
1071 MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
1072 MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
1073 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1074 MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1075 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1077 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1078 MLX5_SET(wq, wq, uar_page, sq->uar.index);
1079 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
1081 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
1083 mlx5_fill_page_array(&sq->wq_ctrl.buf,
1084 (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
1086 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
1094 mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
1096 struct mlx5e_channel *c = sq->channel;
1097 struct mlx5e_priv *priv = c->priv;
1098 struct mlx5_core_dev *mdev = priv->mdev;
1105 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1106 in = mlx5_vzalloc(inlen);
1110 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1112 MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
1113 MLX5_SET(modify_sq_in, in, sq_state, curr_state);
1114 MLX5_SET(sqc, sqc, state, next_state);
1116 err = mlx5_core_modify_sq(mdev, in, inlen);
1124 mlx5e_disable_sq(struct mlx5e_sq *sq)
1126 struct mlx5e_channel *c = sq->channel;
1127 struct mlx5e_priv *priv = c->priv;
1128 struct mlx5_core_dev *mdev = priv->mdev;
1130 mlx5_core_destroy_sq(mdev, sq->sqn);
1134 mlx5e_open_sq(struct mlx5e_channel *c,
1136 struct mlx5e_sq_param *param,
1137 struct mlx5e_sq *sq)
1141 err = mlx5e_create_sq(c, tc, param, sq);
1145 err = mlx5e_enable_sq(sq, param);
1147 goto err_destroy_sq;
1149 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
1151 goto err_disable_sq;
1153 atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_READY);
1158 mlx5e_disable_sq(sq);
1160 mlx5e_destroy_sq(sq);
1166 mlx5e_close_sq(struct mlx5e_sq *sq)
1169 /* ensure hw is notified of all pending wqes */
1170 if (mlx5e_sq_has_room_for(sq, 1))
1171 mlx5e_send_nop(sq, 1, true);
1173 mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
1177 mlx5e_close_sq_wait(struct mlx5e_sq *sq)
1179 /* wait till SQ is empty */
1180 while (sq->cc != sq->pc) {
1182 sq->cq.mcq.comp(&sq->cq.mcq);
1185 mlx5e_disable_sq(sq);
1186 mlx5e_destroy_sq(sq);
1190 mlx5e_create_cq(struct mlx5e_channel *c,
1191 struct mlx5e_cq_param *param,
1192 struct mlx5e_cq *cq,
1193 mlx5e_cq_comp_t *comp)
1195 struct mlx5e_priv *priv = c->priv;
1196 struct mlx5_core_dev *mdev = priv->mdev;
1197 struct mlx5_core_cq *mcq = &cq->mcq;
1203 param->wq.buf_numa_node = 0;
1204 param->wq.db_numa_node = 0;
1205 param->eq_ix = c->ix;
1207 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
1212 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1215 mcq->set_ci_db = cq->wq_ctrl.db.db;
1216 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1217 *mcq->set_ci_db = 0;
1219 mcq->vector = param->eq_ix;
1221 mcq->event = mlx5e_cq_error_event;
1223 mcq->uar = &priv->cq_uar;
1225 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1226 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1237 mlx5e_destroy_cq(struct mlx5e_cq *cq)
1239 mlx5_wq_destroy(&cq->wq_ctrl);
1243 mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param,
1246 struct mlx5e_channel *c = cq->channel;
1247 struct mlx5e_priv *priv = c->priv;
1248 struct mlx5_core_dev *mdev = priv->mdev;
1249 struct mlx5_core_cq *mcq = &cq->mcq;
1257 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1258 sizeof(u64) * cq->wq_ctrl.buf.npages;
1259 in = mlx5_vzalloc(inlen);
1263 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1265 memcpy(cqc, param->cqc, sizeof(param->cqc));
1267 mlx5_fill_page_array(&cq->wq_ctrl.buf,
1268 (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
1270 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1272 MLX5_SET(cqc, cqc, cq_period_mode, moderation_mode);
1273 MLX5_SET(cqc, cqc, c_eqn, eqn);
1274 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
1275 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1277 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1279 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1292 mlx5e_disable_cq(struct mlx5e_cq *cq)
1294 struct mlx5e_channel *c = cq->channel;
1295 struct mlx5e_priv *priv = c->priv;
1296 struct mlx5_core_dev *mdev = priv->mdev;
1298 mlx5_core_destroy_cq(mdev, &cq->mcq);
1302 mlx5e_open_cq(struct mlx5e_channel *c,
1303 struct mlx5e_cq_param *param,
1304 struct mlx5e_cq *cq,
1305 mlx5e_cq_comp_t *comp,
1310 err = mlx5e_create_cq(c, param, cq, comp);
1314 err = mlx5e_enable_cq(cq, param, moderation_mode);
1316 goto err_destroy_cq;
1321 mlx5e_destroy_cq(cq);
1327 mlx5e_close_cq(struct mlx5e_cq *cq)
1329 mlx5e_disable_cq(cq);
1330 mlx5e_destroy_cq(cq);
1334 mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1335 struct mlx5e_channel_param *cparam)
1337 u8 tx_moderation_mode;
1341 switch (c->priv->params.tx_cq_moderation_mode) {
1343 tx_moderation_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1346 if (MLX5_CAP_GEN(c->priv->mdev, cq_period_start_from_cqe))
1347 tx_moderation_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
1349 tx_moderation_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1352 for (tc = 0; tc < c->num_tc; tc++) {
1353 /* open completion queue */
1354 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
1355 &mlx5e_tx_cq_comp, tx_moderation_mode);
1357 goto err_close_tx_cqs;
1362 for (tc--; tc >= 0; tc--)
1363 mlx5e_close_cq(&c->sq[tc].cq);
1369 mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1373 for (tc = 0; tc < c->num_tc; tc++)
1374 mlx5e_close_cq(&c->sq[tc].cq);
1378 mlx5e_open_sqs(struct mlx5e_channel *c,
1379 struct mlx5e_channel_param *cparam)
1384 for (tc = 0; tc < c->num_tc; tc++) {
1385 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1393 for (tc--; tc >= 0; tc--) {
1394 mlx5e_close_sq(&c->sq[tc]);
1395 mlx5e_close_sq_wait(&c->sq[tc]);
1402 mlx5e_close_sqs(struct mlx5e_channel *c)
1406 for (tc = 0; tc < c->num_tc; tc++)
1407 mlx5e_close_sq(&c->sq[tc]);
1411 mlx5e_close_sqs_wait(struct mlx5e_channel *c)
1415 for (tc = 0; tc < c->num_tc; tc++)
1416 mlx5e_close_sq_wait(&c->sq[tc]);
1420 mlx5e_chan_mtx_init(struct mlx5e_channel *c)
1424 mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
1426 for (tc = 0; tc < c->num_tc; tc++) {
1427 mtx_init(&c->sq[tc].lock, "mlx5tx", MTX_NETWORK_LOCK, MTX_DEF);
1428 mtx_init(&c->sq[tc].comp_lock, "mlx5comp", MTX_NETWORK_LOCK,
1434 mlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
1438 mtx_destroy(&c->rq.mtx);
1440 for (tc = 0; tc < c->num_tc; tc++) {
1441 mtx_destroy(&c->sq[tc].lock);
1442 mtx_destroy(&c->sq[tc].comp_lock);
1447 mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1448 struct mlx5e_channel_param *cparam,
1449 struct mlx5e_channel *volatile *cp)
1451 struct mlx5e_channel *c;
1452 u8 rx_moderation_mode;
1455 c = malloc(sizeof(*c), M_MLX5EN, M_WAITOK | M_ZERO);
1462 c->pdev = &priv->mdev->pdev->dev;
1464 c->mkey_be = cpu_to_be32(priv->mr.key);
1465 c->num_tc = priv->num_tc;
1468 mlx5e_chan_mtx_init(c);
1470 /* open transmit completion queue */
1471 err = mlx5e_open_tx_cqs(c, cparam);
1475 switch (priv->params.rx_cq_moderation_mode) {
1477 rx_moderation_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1480 if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
1481 rx_moderation_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
1483 rx_moderation_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1487 /* open receive completion queue */
1488 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
1489 &mlx5e_rx_cq_comp, rx_moderation_mode);
1491 goto err_close_tx_cqs;
1493 err = mlx5e_open_sqs(c, cparam);
1495 goto err_close_rx_cq;
1497 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1501 /* store channel pointer */
1504 /* poll receive queue initially */
1505 c->rq.cq.mcq.comp(&c->rq.cq.mcq);
1511 mlx5e_close_sqs_wait(c);
1514 mlx5e_close_cq(&c->rq.cq);
1517 mlx5e_close_tx_cqs(c);
1520 /* destroy mutexes */
1521 mlx5e_chan_mtx_destroy(c);
1527 mlx5e_close_channel(struct mlx5e_channel *volatile *pp)
1529 struct mlx5e_channel *c = *pp;
1531 /* check if channel is already closed */
1534 mlx5e_close_rq(&c->rq);
1539 mlx5e_close_channel_wait(struct mlx5e_channel *volatile *pp)
1541 struct mlx5e_channel *c = *pp;
1543 /* check if channel is already closed */
1546 /* ensure channel pointer is no longer used */
1549 mlx5e_close_rq_wait(&c->rq);
1550 mlx5e_close_sqs_wait(c);
1551 mlx5e_close_cq(&c->rq.cq);
1552 mlx5e_close_tx_cqs(c);
1553 /* destroy mutexes */
1554 mlx5e_chan_mtx_destroy(c);
1559 mlx5e_build_rq_param(struct mlx5e_priv *priv,
1560 struct mlx5e_rq_param *param)
1562 void *rqc = param->rqc;
1563 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1565 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1566 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1567 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1568 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1569 MLX5_SET(wq, wq, pd, priv->pdn);
1571 param->wq.buf_numa_node = 0;
1572 param->wq.db_numa_node = 0;
1573 param->wq.linear = 1;
1577 mlx5e_build_sq_param(struct mlx5e_priv *priv,
1578 struct mlx5e_sq_param *param)
1580 void *sqc = param->sqc;
1581 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1583 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1584 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1585 MLX5_SET(wq, wq, pd, priv->pdn);
1587 param->wq.buf_numa_node = 0;
1588 param->wq.db_numa_node = 0;
1589 param->wq.linear = 1;
1593 mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1594 struct mlx5e_cq_param *param)
1596 void *cqc = param->cqc;
1598 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1602 mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1603 struct mlx5e_cq_param *param)
1605 void *cqc = param->cqc;
1607 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
1608 MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
1609 MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
1611 mlx5e_build_common_cq_param(priv, param);
1615 mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1616 struct mlx5e_cq_param *param)
1618 void *cqc = param->cqc;
1620 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1621 MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
1622 MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
1624 mlx5e_build_common_cq_param(priv, param);
1628 mlx5e_build_channel_param(struct mlx5e_priv *priv,
1629 struct mlx5e_channel_param *cparam)
1631 memset(cparam, 0, sizeof(*cparam));
1633 mlx5e_build_rq_param(priv, &cparam->rq);
1634 mlx5e_build_sq_param(priv, &cparam->sq);
1635 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1636 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1640 mlx5e_open_channels(struct mlx5e_priv *priv)
1642 struct mlx5e_channel_param cparam;
1648 priv->channel = malloc(priv->params.num_channels *
1649 sizeof(struct mlx5e_channel *), M_MLX5EN, M_WAITOK | M_ZERO);
1650 if (priv->channel == NULL)
1653 mlx5e_build_channel_param(priv, &cparam);
1654 for (i = 0; i < priv->params.num_channels; i++) {
1655 err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1657 goto err_close_channels;
1660 for (j = 0; j < priv->params.num_channels; j++) {
1661 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1663 goto err_close_channels;
1669 for (i--; i >= 0; i--) {
1670 mlx5e_close_channel(&priv->channel[i]);
1671 mlx5e_close_channel_wait(&priv->channel[i]);
1674 /* remove "volatile" attribute from "channel" pointer */
1675 ptr = __DECONST(void *, priv->channel);
1676 priv->channel = NULL;
1678 free(ptr, M_MLX5EN);
1684 mlx5e_close_channels(struct mlx5e_priv *priv)
1689 if (priv->channel == NULL)
1692 for (i = 0; i < priv->params.num_channels; i++)
1693 mlx5e_close_channel(&priv->channel[i]);
1694 for (i = 0; i < priv->params.num_channels; i++)
1695 mlx5e_close_channel_wait(&priv->channel[i]);
1697 /* remove "volatile" attribute from "channel" pointer */
1698 ptr = __DECONST(void *, priv->channel);
1699 priv->channel = NULL;
1701 free(ptr, M_MLX5EN);
1705 mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
1707 struct mlx5_core_dev *mdev = priv->mdev;
1708 u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1709 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1711 memset(in, 0, sizeof(in));
1713 MLX5_SET(tisc, tisc, prio, tc);
1714 MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
1716 return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
1720 mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
1722 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
1726 mlx5e_open_tises(struct mlx5e_priv *priv)
1728 int num_tc = priv->num_tc;
1732 for (tc = 0; tc < num_tc; tc++) {
1733 err = mlx5e_open_tis(priv, tc);
1735 goto err_close_tises;
1741 for (tc--; tc >= 0; tc--)
1742 mlx5e_close_tis(priv, tc);
1748 mlx5e_close_tises(struct mlx5e_priv *priv)
1750 int num_tc = priv->num_tc;
1753 for (tc = 0; tc < num_tc; tc++)
1754 mlx5e_close_tis(priv, tc);
1758 mlx5e_open_rqt(struct mlx5e_priv *priv)
1760 struct mlx5_core_dev *mdev = priv->mdev;
1762 u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
1769 sz = 1 << priv->params.rx_hash_log_tbl_sz;
1771 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1772 in = mlx5_vzalloc(inlen);
1775 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1777 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1778 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1780 for (i = 0; i < sz; i++) {
1783 ix = rss_get_indirection_to_bucket(i);
1787 /* ensure we don't overflow */
1788 ix %= priv->params.num_channels;
1789 MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
1792 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
1794 memset(out, 0, sizeof(out));
1795 err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
1797 priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
1805 mlx5e_close_rqt(struct mlx5e_priv *priv)
1807 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
1808 u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
1810 memset(in, 0, sizeof(in));
1812 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
1813 MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
1815 mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
1820 mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
1822 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1825 MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
1827 #define ROUGH_MAX_L2_L3_HDR_SZ 256
1829 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
1830 MLX5_HASH_FIELD_SEL_DST_IP)
1832 #define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
1833 MLX5_HASH_FIELD_SEL_DST_IP |\
1834 MLX5_HASH_FIELD_SEL_L4_SPORT |\
1835 MLX5_HASH_FIELD_SEL_L4_DPORT)
1837 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
1838 MLX5_HASH_FIELD_SEL_DST_IP |\
1839 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1841 if (priv->params.hw_lro_en) {
1842 MLX5_SET(tirc, tirc, lro_enable_mask,
1843 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1844 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1845 MLX5_SET(tirc, tirc, lro_max_msg_sz,
1846 (priv->params.lro_wqe_sz -
1847 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1848 /* TODO: add the option to choose timer value dynamically */
1849 MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1850 MLX5_CAP_ETH(priv->mdev,
1851 lro_timer_supported_periods[2]));
1854 /* setup parameters for hashing TIR type, if any */
1857 MLX5_SET(tirc, tirc, disp_type,
1858 MLX5_TIRC_DISP_TYPE_DIRECT);
1859 MLX5_SET(tirc, tirc, inline_rqn,
1860 priv->channel[0]->rq.rqn);
1863 MLX5_SET(tirc, tirc, disp_type,
1864 MLX5_TIRC_DISP_TYPE_INDIRECT);
1865 MLX5_SET(tirc, tirc, indirect_table,
1867 MLX5_SET(tirc, tirc, rx_hash_fn,
1868 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
1869 hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
1872 * The FreeBSD RSS implementation does currently not
1873 * support symmetric Toeplitz hashes:
1875 MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
1876 rss_getkey((uint8_t *)hkey);
1878 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1879 hkey[0] = cpu_to_be32(0xD181C62C);
1880 hkey[1] = cpu_to_be32(0xF7F4DB5B);
1881 hkey[2] = cpu_to_be32(0x1983A2FC);
1882 hkey[3] = cpu_to_be32(0x943E1ADB);
1883 hkey[4] = cpu_to_be32(0xD9389E6B);
1884 hkey[5] = cpu_to_be32(0xD1039C2C);
1885 hkey[6] = cpu_to_be32(0xA74499AD);
1886 hkey[7] = cpu_to_be32(0x593D56D9);
1887 hkey[8] = cpu_to_be32(0xF3253C06);
1888 hkey[9] = cpu_to_be32(0x2ADC1FFC);
1894 case MLX5E_TT_IPV4_TCP:
1895 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1896 MLX5_L3_PROT_TYPE_IPV4);
1897 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1898 MLX5_L4_PROT_TYPE_TCP);
1900 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
1901 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1905 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1909 case MLX5E_TT_IPV6_TCP:
1910 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1911 MLX5_L3_PROT_TYPE_IPV6);
1912 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1913 MLX5_L4_PROT_TYPE_TCP);
1915 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
1916 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1920 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1924 case MLX5E_TT_IPV4_UDP:
1925 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1926 MLX5_L3_PROT_TYPE_IPV4);
1927 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1928 MLX5_L4_PROT_TYPE_UDP);
1930 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
1931 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1935 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1939 case MLX5E_TT_IPV6_UDP:
1940 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1941 MLX5_L3_PROT_TYPE_IPV6);
1942 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1943 MLX5_L4_PROT_TYPE_UDP);
1945 if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
1946 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1950 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1954 case MLX5E_TT_IPV4_IPSEC_AH:
1955 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1956 MLX5_L3_PROT_TYPE_IPV4);
1957 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1958 MLX5_HASH_IP_IPSEC_SPI);
1961 case MLX5E_TT_IPV6_IPSEC_AH:
1962 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1963 MLX5_L3_PROT_TYPE_IPV6);
1964 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1965 MLX5_HASH_IP_IPSEC_SPI);
1968 case MLX5E_TT_IPV4_IPSEC_ESP:
1969 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1970 MLX5_L3_PROT_TYPE_IPV4);
1971 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1972 MLX5_HASH_IP_IPSEC_SPI);
1975 case MLX5E_TT_IPV6_IPSEC_ESP:
1976 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1977 MLX5_L3_PROT_TYPE_IPV6);
1978 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1979 MLX5_HASH_IP_IPSEC_SPI);
1983 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1984 MLX5_L3_PROT_TYPE_IPV4);
1985 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1990 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1991 MLX5_L3_PROT_TYPE_IPV6);
1992 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2002 mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
2004 struct mlx5_core_dev *mdev = priv->mdev;
2010 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2011 in = mlx5_vzalloc(inlen);
2014 tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
2016 mlx5e_build_tir_ctx(priv, tirc, tt);
2018 err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
2026 mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
2028 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
2032 mlx5e_open_tirs(struct mlx5e_priv *priv)
2037 for (i = 0; i < MLX5E_NUM_TT; i++) {
2038 err = mlx5e_open_tir(priv, i);
2040 goto err_close_tirs;
2046 for (i--; i >= 0; i--)
2047 mlx5e_close_tir(priv, i);
2053 mlx5e_close_tirs(struct mlx5e_priv *priv)
2057 for (i = 0; i < MLX5E_NUM_TT; i++)
2058 mlx5e_close_tir(priv, i);
2062 * SW MTU does not include headers,
2063 * HW MTU includes all headers and checksums.
2066 mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
2068 struct mlx5e_priv *priv = ifp->if_softc;
2069 struct mlx5_core_dev *mdev = priv->mdev;
2074 err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(sw_mtu));
2076 if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n",
2077 __func__, sw_mtu, err);
2080 err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
2082 ifp->if_mtu = MLX5E_HW2SW_MTU(hw_mtu);
2084 if (ifp->if_mtu != sw_mtu) {
2085 if_printf(ifp, "Port MTU %d is different than "
2086 "ifp mtu %d\n", sw_mtu, (int)ifp->if_mtu);
2089 if_printf(ifp, "Query port MTU, after setting new "
2090 "MTU value, failed\n");
2091 ifp->if_mtu = sw_mtu;
2097 mlx5e_open_locked(struct ifnet *ifp)
2099 struct mlx5e_priv *priv = ifp->if_softc;
2102 /* check if already opened */
2103 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2107 if (rss_getnumbuckets() > priv->params.num_channels) {
2108 if_printf(ifp, "NOTE: There are more RSS buckets(%u) than "
2109 "channels(%u) available\n", rss_getnumbuckets(),
2110 priv->params.num_channels);
2113 err = mlx5e_open_tises(priv);
2115 if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n",
2119 err = mlx5_vport_alloc_q_counter(priv->mdev, &priv->counter_set_id);
2121 if_printf(priv->ifp,
2122 "%s: mlx5_vport_alloc_q_counter failed: %d\n",
2124 goto err_close_tises;
2126 err = mlx5e_open_channels(priv);
2128 if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n",
2130 goto err_dalloc_q_counter;
2132 err = mlx5e_open_rqt(priv);
2134 if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n",
2136 goto err_close_channels;
2138 err = mlx5e_open_tirs(priv);
2140 if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n",
2142 goto err_close_rqls;
2144 err = mlx5e_open_flow_table(priv);
2146 if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n",
2148 goto err_close_tirs;
2150 err = mlx5e_add_all_vlan_rules(priv);
2152 if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
2154 goto err_close_flow_table;
2156 set_bit(MLX5E_STATE_OPENED, &priv->state);
2158 mlx5e_update_carrier(priv);
2159 mlx5e_set_rx_mode_core(priv);
2163 err_close_flow_table:
2164 mlx5e_close_flow_table(priv);
2167 mlx5e_close_tirs(priv);
2170 mlx5e_close_rqt(priv);
2173 mlx5e_close_channels(priv);
2175 err_dalloc_q_counter:
2176 mlx5_vport_dealloc_q_counter(priv->mdev, priv->counter_set_id);
2179 mlx5e_close_tises(priv);
2185 mlx5e_open(void *arg)
2187 struct mlx5e_priv *priv = arg;
2190 if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
2191 if_printf(priv->ifp,
2192 "%s: Setting port status to up failed\n",
2195 mlx5e_open_locked(priv->ifp);
2196 priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
2201 mlx5e_close_locked(struct ifnet *ifp)
2203 struct mlx5e_priv *priv = ifp->if_softc;
2205 /* check if already closed */
2206 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2209 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2211 mlx5e_set_rx_mode_core(priv);
2212 mlx5e_del_all_vlan_rules(priv);
2213 if_link_state_change(priv->ifp, LINK_STATE_DOWN);
2214 mlx5e_close_flow_table(priv);
2215 mlx5e_close_tirs(priv);
2216 mlx5e_close_rqt(priv);
2217 mlx5e_close_channels(priv);
2218 mlx5_vport_dealloc_q_counter(priv->mdev, priv->counter_set_id);
2219 mlx5e_close_tises(priv);
2224 #if (__FreeBSD_version >= 1100000)
2226 mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
2228 struct mlx5e_priv *priv = ifp->if_softc;
2231 /* PRIV_LOCK(priv); XXX not allowed */
2233 case IFCOUNTER_IPACKETS:
2234 retval = priv->stats.vport.rx_packets;
2236 case IFCOUNTER_IERRORS:
2237 retval = priv->stats.vport.rx_error_packets;
2239 case IFCOUNTER_IQDROPS:
2240 retval = priv->stats.vport.rx_out_of_buffer;
2242 case IFCOUNTER_OPACKETS:
2243 retval = priv->stats.vport.tx_packets;
2245 case IFCOUNTER_OERRORS:
2246 retval = priv->stats.vport.tx_error_packets;
2248 case IFCOUNTER_IBYTES:
2249 retval = priv->stats.vport.rx_bytes;
2251 case IFCOUNTER_OBYTES:
2252 retval = priv->stats.vport.tx_bytes;
2254 case IFCOUNTER_IMCASTS:
2255 retval = priv->stats.vport.rx_multicast_packets;
2257 case IFCOUNTER_OMCASTS:
2258 retval = priv->stats.vport.tx_multicast_packets;
2260 case IFCOUNTER_OQDROPS:
2261 retval = priv->stats.vport.tx_queue_dropped;
2264 retval = if_get_counter_default(ifp, cnt);
2267 /* PRIV_UNLOCK(priv); XXX not allowed */
2273 mlx5e_set_rx_mode(struct ifnet *ifp)
2275 struct mlx5e_priv *priv = ifp->if_softc;
2277 schedule_work(&priv->set_rx_mode_work);
2281 mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2283 struct mlx5e_priv *priv;
2285 struct ifi2creq i2c;
2292 priv = ifp->if_softc;
2294 /* check if detaching */
2295 if (priv == NULL || priv->gone != 0)
2300 ifr = (struct ifreq *)data;
2303 mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
2305 if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
2306 ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
2309 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2311 mlx5e_close_locked(ifp);
2314 mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
2317 mlx5e_open_locked(ifp);
2320 if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n",
2321 MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
2326 if ((ifp->if_flags & IFF_UP) &&
2327 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2328 mlx5e_set_rx_mode(ifp);
2332 if (ifp->if_flags & IFF_UP) {
2333 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2334 if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
2335 mlx5e_open_locked(ifp);
2336 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2337 mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
2340 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2341 mlx5_set_port_status(priv->mdev,
2343 if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
2344 mlx5e_close_locked(ifp);
2345 mlx5e_update_carrier(priv);
2346 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2353 mlx5e_set_rx_mode(ifp);
2358 ifr = (struct ifreq *)data;
2359 error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
2362 ifr = (struct ifreq *)data;
2364 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2366 if (mask & IFCAP_TXCSUM) {
2367 ifp->if_capenable ^= IFCAP_TXCSUM;
2368 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2370 if (IFCAP_TSO4 & ifp->if_capenable &&
2371 !(IFCAP_TXCSUM & ifp->if_capenable)) {
2372 ifp->if_capenable &= ~IFCAP_TSO4;
2373 ifp->if_hwassist &= ~CSUM_IP_TSO;
2375 "tso4 disabled due to -txcsum.\n");
2378 if (mask & IFCAP_TXCSUM_IPV6) {
2379 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2380 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2382 if (IFCAP_TSO6 & ifp->if_capenable &&
2383 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2384 ifp->if_capenable &= ~IFCAP_TSO6;
2385 ifp->if_hwassist &= ~CSUM_IP6_TSO;
2387 "tso6 disabled due to -txcsum6.\n");
2390 if (mask & IFCAP_RXCSUM)
2391 ifp->if_capenable ^= IFCAP_RXCSUM;
2392 if (mask & IFCAP_RXCSUM_IPV6)
2393 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2394 if (mask & IFCAP_TSO4) {
2395 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
2396 !(IFCAP_TXCSUM & ifp->if_capenable)) {
2397 if_printf(ifp, "enable txcsum first.\n");
2401 ifp->if_capenable ^= IFCAP_TSO4;
2402 ifp->if_hwassist ^= CSUM_IP_TSO;
2404 if (mask & IFCAP_TSO6) {
2405 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
2406 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
2407 if_printf(ifp, "enable txcsum6 first.\n");
2411 ifp->if_capenable ^= IFCAP_TSO6;
2412 ifp->if_hwassist ^= CSUM_IP6_TSO;
2414 if (mask & IFCAP_VLAN_HWFILTER) {
2415 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2416 mlx5e_disable_vlan_filter(priv);
2418 mlx5e_enable_vlan_filter(priv);
2420 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
2422 if (mask & IFCAP_VLAN_HWTAGGING)
2423 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2424 if (mask & IFCAP_WOL_MAGIC)
2425 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2427 VLAN_CAPABILITIES(ifp);
2428 /* turn off LRO means also turn of HW LRO - if it's on */
2429 if (mask & IFCAP_LRO) {
2430 int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2431 bool need_restart = false;
2433 ifp->if_capenable ^= IFCAP_LRO;
2434 if (!(ifp->if_capenable & IFCAP_LRO)) {
2435 if (priv->params.hw_lro_en) {
2436 priv->params.hw_lro_en = false;
2437 need_restart = true;
2438 /* Not sure this is the correct way */
2439 priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
2442 if (was_opened && need_restart) {
2443 mlx5e_close_locked(ifp);
2444 mlx5e_open_locked(ifp);
2452 ifr = (struct ifreq *)data;
2455 * Copy from the user-space address ifr_data to the
2456 * kernel-space address i2c
2458 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
2462 if (i2c.len > sizeof(i2c.data)) {
2468 /* Get module_num which is required for the query_eeprom */
2469 error = mlx5_query_module_num(priv->mdev, &module_num);
2471 if_printf(ifp, "Query module num failed, eeprom "
2472 "reading is not supported\n");
2477 * Note that we ignore i2c.addr here. The driver hardcodes
2478 * the address to 0x50, while standard expects it to be 0xA0.
2480 error = mlx5_query_eeprom(priv->mdev,
2481 MLX5E_I2C_ADDR_LOW, MLX5E_EEPROM_LOW_PAGE,
2482 (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
2483 (uint32_t *)i2c.data, &size_read);
2485 if_printf(ifp, "Query eeprom failed, eeprom "
2486 "reading is not supported\n");
2490 if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
2491 error = mlx5_query_eeprom(priv->mdev,
2492 MLX5E_I2C_ADDR_LOW, MLX5E_EEPROM_LOW_PAGE,
2493 (uint32_t)(i2c.offset + size_read),
2494 (uint32_t)(i2c.len - size_read), module_num,
2495 (uint32_t *)(i2c.data + size_read), &size_read);
2498 if_printf(ifp, "Query eeprom failed, eeprom "
2499 "reading is not supported\n");
2503 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
2509 error = ether_ioctl(ifp, command, data);
2516 mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2519 * TODO: uncoment once FW really sets all these bits if
2520 * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
2521 * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
2522 * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
2526 /* TODO: add more must-to-have features */
2532 mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
2533 struct mlx5e_priv *priv,
2534 int num_comp_vectors)
2537 * TODO: Consider link speed for setting "log_sq_size",
2538 * "log_rq_size" and "cq_moderation_xxx":
2540 priv->params.log_sq_size =
2541 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
2542 priv->params.log_rq_size =
2543 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2544 priv->params.rx_cq_moderation_usec =
2545 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
2546 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
2547 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2548 priv->params.rx_cq_moderation_mode =
2549 MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
2550 priv->params.rx_cq_moderation_pkts =
2551 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2552 priv->params.tx_cq_moderation_usec =
2553 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
2554 priv->params.tx_cq_moderation_pkts =
2555 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
2556 priv->params.min_rx_wqes =
2557 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
2558 priv->params.rx_hash_log_tbl_sz =
2559 (order_base_2(num_comp_vectors) >
2560 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
2561 order_base_2(num_comp_vectors) :
2562 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
2563 priv->params.num_tc = 1;
2564 priv->params.default_vlan_prio = 0;
2565 priv->counter_set_id = -1;
2568 * hw lro is currently defaulted to off. when it won't anymore we
2569 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
2571 priv->params.hw_lro_en = false;
2572 priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
2575 priv->params.num_channels = num_comp_vectors;
2576 priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
2577 priv->queue_mapping_channel_mask =
2578 roundup_pow_of_two(num_comp_vectors) - 1;
2579 priv->num_tc = priv->params.num_tc;
2580 priv->default_vlan_prio = priv->params.default_vlan_prio;
2582 INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2583 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2584 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
2588 mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
2589 struct mlx5_core_mr *mr)
2591 struct ifnet *ifp = priv->ifp;
2592 struct mlx5_core_dev *mdev = priv->mdev;
2593 struct mlx5_create_mkey_mbox_in *in;
2596 in = mlx5_vzalloc(sizeof(*in));
2598 if_printf(ifp, "%s: failed to allocate inbox\n", __func__);
2601 in->seg.flags = MLX5_PERM_LOCAL_WRITE |
2602 MLX5_PERM_LOCAL_READ |
2603 MLX5_ACCESS_MODE_PA;
2604 in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
2605 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
2607 err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
2610 if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n",
2618 static const char *mlx5e_vport_stats_desc[] = {
2619 MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
2622 static const char *mlx5e_pport_stats_desc[] = {
2623 MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
2627 mlx5e_priv_mtx_init(struct mlx5e_priv *priv)
2629 mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
2630 sx_init(&priv->state_lock, "mlx5state");
2631 callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
2635 mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv)
2637 mtx_destroy(&priv->async_events_mtx);
2638 sx_destroy(&priv->state_lock);
2642 sysctl_firmware(SYSCTL_HANDLER_ARGS)
2645 * %d.%d%.d the string format.
2646 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
2647 * We need at most 5 chars to store that.
2648 * It also has: two "." and NULL at the end, which means we need 18
2649 * (5*3 + 3) chars at most.
2652 struct mlx5e_priv *priv = arg1;
2655 snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
2656 fw_rev_sub(priv->mdev));
2657 error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
2662 mlx5e_add_hw_stats(struct mlx5e_priv *priv)
2664 SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
2665 OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0,
2666 sysctl_firmware, "A", "HCA firmware version");
2668 SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
2669 OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
2674 mlx5e_create_ifp(struct mlx5_core_dev *mdev)
2676 static volatile int mlx5_en_unit;
2678 struct mlx5e_priv *priv;
2679 u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
2680 struct sysctl_oid_list *child;
2681 int ncv = mdev->priv.eq_table.num_comp_vectors;
2687 if (mlx5e_check_required_hca_cap(mdev)) {
2688 mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
2691 priv = malloc(sizeof(*priv), M_MLX5EN, M_WAITOK | M_ZERO);
2693 mlx5_core_err(mdev, "malloc() failed\n");
2696 mlx5e_priv_mtx_init(priv);
2698 ifp = priv->ifp = if_alloc(IFT_ETHER);
2700 mlx5_core_err(mdev, "if_alloc() failed\n");
2703 ifp->if_softc = priv;
2704 if_initname(ifp, "mce", atomic_fetchadd_int(&mlx5_en_unit, 1));
2705 ifp->if_mtu = ETHERMTU;
2706 ifp->if_init = mlx5e_open;
2707 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2708 ifp->if_ioctl = mlx5e_ioctl;
2709 ifp->if_transmit = mlx5e_xmit;
2710 ifp->if_qflush = if_qflush;
2711 #if (__FreeBSD_version >= 1100000)
2712 ifp->if_get_counter = mlx5e_get_counter;
2714 ifp->if_snd.ifq_maxlen = ifqmaxlen;
2716 * Set driver features
2718 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
2719 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
2720 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
2721 ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
2722 ifp->if_capabilities |= IFCAP_LRO;
2723 ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
2725 /* set TSO limits so that we don't have to drop TX packets */
2726 ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2727 ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
2728 ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
2730 ifp->if_capenable = ifp->if_capabilities;
2731 ifp->if_hwassist = 0;
2732 if (ifp->if_capenable & IFCAP_TSO)
2733 ifp->if_hwassist |= CSUM_TSO;
2734 if (ifp->if_capenable & IFCAP_TXCSUM)
2735 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2736 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
2737 ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2739 /* ifnet sysctl tree */
2740 sysctl_ctx_init(&priv->sysctl_ctx);
2741 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
2742 OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name");
2743 if (priv->sysctl_ifnet == NULL) {
2744 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
2745 goto err_free_sysctl;
2747 snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
2748 priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
2749 OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit");
2750 if (priv->sysctl_ifnet == NULL) {
2751 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
2752 goto err_free_sysctl;
2755 /* HW sysctl tree */
2756 child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
2757 priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
2758 OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw");
2759 if (priv->sysctl_hw == NULL) {
2760 mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
2761 goto err_free_sysctl;
2763 mlx5e_build_ifp_priv(mdev, priv, ncv);
2764 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
2766 if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n",
2768 goto err_free_sysctl;
2770 err = mlx5_core_alloc_pd(mdev, &priv->pdn);
2772 if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n",
2774 goto err_unmap_free_uar;
2776 err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
2778 if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n",
2780 goto err_dealloc_pd;
2782 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
2784 if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n",
2786 goto err_dealloc_transport_domain;
2788 mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
2790 /* set default MTU */
2791 mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
2794 device_set_desc(mdev->pdev->dev.bsddev, mlx5e_version);
2796 /* Set default media status */
2797 priv->media_status_last = IFM_AVALID;
2798 priv->media_active_last = IFM_ETHER | IFM_AUTO;
2800 /* Pauseframes are enabled by default */
2801 priv->params_ethtool.tx_pauseframe_control = 1;
2802 priv->params_ethtool.rx_pauseframe_control = 1;
2804 err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN);
2807 if_printf(ifp, "%s: Query port media capability failed, %d\n",
2811 /* Setup supported medias */
2812 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
2813 mlx5e_media_change, mlx5e_media_status);
2815 for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
2816 if (mlx5e_mode_table[i].baudrate == 0)
2818 if (MLX5E_PROT_MASK(i) & eth_proto_cap)
2819 ifmedia_add(&priv->media,
2820 IFM_ETHER | mlx5e_mode_table[i].subtype |
2824 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2825 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
2826 ether_ifattach(ifp, dev_addr);
2828 /* Register for VLAN events */
2829 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
2830 mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
2831 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
2832 mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
2834 /* Link is down by default */
2835 if_link_state_change(ifp, LINK_STATE_DOWN);
2837 mlx5e_enable_async_events(priv);
2839 mlx5e_add_hw_stats(priv);
2841 mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
2842 "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
2843 priv->stats.vport.arg);
2845 mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
2846 "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
2847 priv->stats.pport.arg);
2849 mlx5e_create_ethtool(priv);
2851 mtx_lock(&priv->async_events_mtx);
2852 mlx5e_update_stats(priv);
2853 mtx_unlock(&priv->async_events_mtx);
2857 err_dealloc_transport_domain:
2858 mlx5_dealloc_transport_domain(mdev, priv->tdn);
2861 mlx5_core_dealloc_pd(mdev, priv->pdn);
2864 mlx5_unmap_free_uar(mdev, &priv->cq_uar);
2867 sysctl_ctx_free(&priv->sysctl_ctx);
2872 mlx5e_priv_mtx_destroy(priv);
2873 free(priv, M_MLX5EN);
2878 mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
2880 struct mlx5e_priv *priv = vpriv;
2881 struct ifnet *ifp = priv->ifp;
2883 /* don't allow more IOCTLs */
2886 /* XXX wait a bit to allow IOCTL handlers to complete */
2889 /* stop watchdog timer */
2890 callout_drain(&priv->watchdog);
2892 if (priv->vlan_attach != NULL)
2893 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
2894 if (priv->vlan_detach != NULL)
2895 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
2897 /* make sure device gets closed */
2899 mlx5e_close_locked(ifp);
2902 /* unregister device */
2903 ifmedia_removeall(&priv->media);
2904 ether_ifdetach(ifp);
2907 /* destroy all remaining sysctl nodes */
2908 if (priv->sysctl_debug)
2909 sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
2910 sysctl_ctx_free(&priv->stats.vport.ctx);
2911 sysctl_ctx_free(&priv->stats.pport.ctx);
2912 sysctl_ctx_free(&priv->sysctl_ctx);
2914 mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
2915 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
2916 mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
2917 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
2918 mlx5e_disable_async_events(priv);
2919 flush_scheduled_work();
2920 mlx5e_priv_mtx_destroy(priv);
2921 free(priv, M_MLX5EN);
2925 mlx5e_get_ifp(void *vpriv)
2927 struct mlx5e_priv *priv = vpriv;
2932 static struct mlx5_interface mlx5e_interface = {
2933 .add = mlx5e_create_ifp,
2934 .remove = mlx5e_destroy_ifp,
2935 .event = mlx5e_async_event,
2936 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
2937 .get_dev = mlx5e_get_ifp,
2943 mlx5_register_interface(&mlx5e_interface);
2949 mlx5_unregister_interface(&mlx5e_interface);
2952 module_init_order(mlx5e_init, SI_ORDER_THIRD);
2953 module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD);
2955 #if (__FreeBSD_version >= 1100000)
2956 MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
2958 MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
2959 MODULE_VERSION(mlx5en, 1);