2 * Copyright (c) 2015-2019 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include "opt_kern_tls.h"
31 #include <machine/atomic.h>
34 mlx5e_do_send_cqe_inline(struct mlx5e_sq *sq)
37 /* interleave the CQEs */
38 if (sq->cev_counter >= sq->cev_factor) {
46 mlx5e_do_send_cqe(struct mlx5e_sq *sq)
49 return (mlx5e_do_send_cqe_inline(sq));
53 mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
55 u16 pi = sq->pc & sq->wq.sz_m1;
56 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
58 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
60 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
61 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
62 if (mlx5e_do_send_cqe_inline(sq))
63 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
65 wqe->ctrl.fm_ce_se = 0;
67 /* Copy data for doorbell */
68 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
70 sq->mbuf[pi].mbuf = NULL;
71 sq->mbuf[pi].num_bytes = 0;
72 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
73 sq->pc += sq->mbuf[pi].num_wqebbs;
76 #if (__FreeBSD_version >= 1100000)
77 static uint32_t mlx5e_hash_value;
80 mlx5e_hash_init(void *arg)
82 mlx5e_hash_value = m_ether_tcpip_hash_init();
85 /* Make kernel call mlx5e_hash_init after the random stack finished initializing */
86 SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL);
89 static struct mlx5e_sq *
90 mlx5e_select_queue_by_send_tag(struct ifnet *ifp, struct mbuf *mb)
92 struct m_snd_tag *mb_tag;
95 mb_tag = mb->m_pkthdr.snd_tag;
100 /* get pointer to sendqueue */
101 switch (mb_tag->type) {
103 case IF_SND_TAG_TYPE_RATE_LIMIT:
104 sq = container_of(mb_tag,
105 struct mlx5e_rl_channel, tag)->sq;
108 case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
109 mb_tag = container_of(mb_tag, struct mlx5e_tls_tag, tag)->rl_tag;
113 case IF_SND_TAG_TYPE_UNLIMITED:
114 sq = &container_of(mb_tag,
115 struct mlx5e_channel, tag)->sq[0];
116 KASSERT((mb_tag->refcount > 0),
117 ("mlx5e_select_queue: Channel refs are zero for unlimited tag"));
120 case IF_SND_TAG_TYPE_TLS:
121 mb_tag = container_of(mb_tag, struct mlx5e_tls_tag, tag)->rl_tag;
130 if (sq != NULL && READ_ONCE(sq->running) != 0)
136 static struct mlx5e_sq *
137 mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
139 struct mlx5e_priv *priv = ifp->if_softc;
144 /* obtain VLAN information if present */
145 if (mb->m_flags & M_VLANTAG) {
146 tc = (mb->m_pkthdr.ether_vtag >> 13);
147 if (tc >= priv->num_tc)
148 tc = priv->default_vlan_prio;
150 tc = priv->default_vlan_prio;
153 ch = priv->params.num_channels;
155 /* check if flowid is set */
156 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
160 if (rss_hash2bucket(mb->m_pkthdr.flowid,
161 M_HASHTYPE_GET(mb), &temp) == 0)
165 ch = (mb->m_pkthdr.flowid % 128) % ch;
167 #if (__FreeBSD_version >= 1100000)
168 ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 |
169 MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch;
172 * m_ether_tcpip_hash not present in stable, so just
173 * throw unhashed mbufs on queue 0
179 /* check if send queue is running */
180 sq = &priv->channel[ch].sq[tc];
181 if (likely(READ_ONCE(sq->running) != 0))
187 mlx5e_get_l2_header_size(struct mlx5e_sq *sq, struct mbuf *mb)
189 struct ether_vlan_header *eh;
193 eh = mtod(mb, struct ether_vlan_header *);
194 if (unlikely(mb->m_len < ETHER_HDR_LEN)) {
196 } else if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
197 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)))
199 eth_type = ntohs(eh->evl_proto);
200 min_inline = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
202 eth_type = ntohs(eh->evl_encap_proto);
203 min_inline = ETHER_HDR_LEN;
210 * Make sure the TOS(IPv4) or traffic class(IPv6)
211 * field gets inlined. Else the SQ may stall.
220 * m_copydata() will be used on the remaining header which
221 * does not need to reside within the first m_len bytes of
224 if (mb->m_pkthdr.len < min_inline)
229 return (MIN(mb->m_pkthdr.len, sq->max_inline));
233 * This function parse IPv4 and IPv6 packets looking for TCP and UDP
236 * Upon return the pointer at which the "ppth" argument points, is set
237 * to the location of the TCP header. NULL is used if no TCP header is
240 * The return value indicates the number of bytes from the beginning
241 * of the packet until the first byte after the TCP or UDP header. If
242 * this function returns zero, the parsing failed.
245 mlx5e_get_full_header_size(const struct mbuf *mb, const struct tcphdr **ppth)
247 const struct ether_vlan_header *eh;
248 const struct tcphdr *th;
250 int ip_hlen, tcp_hlen;
251 const struct ip6_hdr *ip6;
255 eh = mtod(mb, const struct ether_vlan_header *);
256 if (unlikely(mb->m_len < ETHER_HDR_LEN))
258 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
259 if (unlikely(mb->m_len < (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)))
261 eth_type = ntohs(eh->evl_proto);
262 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
264 eth_type = ntohs(eh->evl_encap_proto);
265 eth_hdr_len = ETHER_HDR_LEN;
270 ip = (const struct ip *)(mb->m_data + eth_hdr_len);
271 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip)))
275 ip_hlen = ip->ip_hl << 2;
276 eth_hdr_len += ip_hlen;
279 ip_hlen = ip->ip_hl << 2;
280 eth_hdr_len += ip_hlen + 8;
288 ip6 = (const struct ip6_hdr *)(mb->m_data + eth_hdr_len);
289 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*ip6)))
291 switch (ip6->ip6_nxt) {
293 eth_hdr_len += sizeof(*ip6);
296 eth_hdr_len += sizeof(*ip6) + 8;
307 if (unlikely(mb->m_len < eth_hdr_len + sizeof(*th))) {
308 const struct mbuf *m_th = mb->m_next;
309 if (unlikely(mb->m_len != eth_hdr_len ||
310 m_th == NULL || m_th->m_len < sizeof(*th)))
312 th = (const struct tcphdr *)(m_th->m_data);
314 th = (const struct tcphdr *)(mb->m_data + eth_hdr_len);
316 tcp_hlen = th->th_off << 2;
317 eth_hdr_len += tcp_hlen;
320 * m_copydata() will be used on the remaining header which
321 * does not need to reside within the first m_len bytes of
324 if (unlikely(mb->m_pkthdr.len < eth_hdr_len))
328 return (eth_hdr_len);
335 struct mlx5_wqe_dump_seg {
336 struct mlx5_wqe_ctrl_seg ctrl;
337 struct mlx5_wqe_data_seg data;
338 } __aligned(MLX5_SEND_WQE_BB);
340 CTASSERT(DIV_ROUND_UP(2, MLX5_SEND_WQEBB_NUM_DS) == 1);
343 mlx5e_sq_dump_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf **mbp)
345 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
346 struct mlx5_wqe_dump_seg *wqe;
347 struct mlx5_wqe_dump_seg *wqe_last;
355 const u32 ds_cnt = 2;
357 const u8 opcode = MLX5_OPCODE_DUMP;
359 /* get pointer to mbuf */
362 /* get producer index */
363 pi = sq->pc & sq->wq.sz_m1;
365 sq->mbuf[pi].num_bytes = mb->m_pkthdr.len;
366 sq->mbuf[pi].num_wqebbs = 0;
368 /* check number of segments in mbuf */
369 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
370 mb, segs, &nsegs, BUS_DMA_NOWAIT);
372 /* update statistics */
373 sq->stats.defragged++;
374 /* too many mbuf fragments */
375 mb = m_defrag(*mbp, M_NOWAIT);
381 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
382 mb, segs, &nsegs, BUS_DMA_NOWAIT);
388 /* make sure all mbuf data, if any, is visible to the bus */
389 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
390 BUS_DMASYNC_PREWRITE);
392 /* compute number of real DUMP segments */
393 msb = sq->priv->params_ethtool.hw_mtu_msb;
394 for (x = xsegs = 0; x != nsegs; x++)
395 xsegs += howmany((u32)segs[x].ds_len, msb);
397 /* check if there are no segments */
398 if (unlikely(xsegs == 0)) {
399 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
401 *mbp = NULL; /* safety clear */
405 /* return ENOBUFS if the queue is full */
406 if (unlikely(!mlx5e_sq_has_room_for(sq, xsegs))) {
408 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
410 *mbp = NULL; /* safety clear */
414 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
415 wqe_last = mlx5_wq_cyc_get_wqe(&sq->wq, sq->wq.sz_m1);
417 for (x = 0; x != nsegs; x++) {
418 for (off = 0; off < segs[x].ds_len; off += msb) {
419 u32 len = segs[x].ds_len - off;
422 if (likely(len > msb))
425 memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
427 /* fill control segment */
428 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
429 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
430 wqe->ctrl.imm = cpu_to_be32(parg->tisn << 8);
432 /* fill data segment */
433 wqe->data.addr = cpu_to_be64((uint64_t)segs[x].ds_addr + off);
434 wqe->data.lkey = sq->mkey_be;
435 wqe->data.byte_count = cpu_to_be32(len);
437 /* advance to next building block */
438 if (unlikely(wqe == wqe_last))
439 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, 0);
443 sq->mbuf[pi].num_wqebbs++;
448 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
449 wqe_last = mlx5_wq_cyc_get_wqe(&sq->wq, (sq->pc - 1) & sq->wq.sz_m1);
451 /* put in place data fence */
452 wqe->ctrl.fm_ce_se |= MLX5_FENCE_MODE_INITIATOR_SMALL;
454 /* check if we should generate a completion event */
455 if (mlx5e_do_send_cqe_inline(sq))
456 wqe_last->ctrl.fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
458 /* copy data for doorbell */
459 memcpy(sq->doorbell.d32, wqe_last, sizeof(sq->doorbell.d32));
461 /* store pointer to mbuf */
462 sq->mbuf[pi].mbuf = mb;
463 sq->mbuf[pi].p_refcount = parg->pref;
464 atomic_add_int(parg->pref, 1);
466 /* count all traffic going out */
468 sq->stats.bytes += sq->mbuf[pi].num_bytes;
470 *mbp = NULL; /* safety clear */
481 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
483 bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
484 struct mlx5e_xmit_args args = {};
485 struct mlx5_wqe_data_seg *dseg;
486 struct mlx5e_tx_wqe *wqe;
499 /* Return ENOBUFS if the queue is full */
500 if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) {
505 /* Align SQ edge with NOPs to avoid WQE wrap around */
506 pi = ((~sq->pc) & sq->wq.sz_m1);
507 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
508 /* Send one multi NOP message instead of many */
509 mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
510 pi = ((~sq->pc) & sq->wq.sz_m1);
511 if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
518 /* Special handling for TLS packets, if any */
519 switch (mlx5e_sq_tls_xmit(sq, &args, mbp)) {
522 case MLX5E_TLS_FAILURE:
526 case MLX5E_TLS_DEFERRED:
528 case MLX5E_TLS_CONTINUE:
534 /* Setup local variables */
535 pi = sq->pc & sq->wq.sz_m1;
536 wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
539 memset(wqe, 0, sizeof(*wqe));
541 /* get pointer to mbuf */
544 /* Send a copy of the frame to the BPF listener, if any */
545 if (ifp != NULL && ifp->if_bpf != NULL)
546 ETHER_BPF_MTAP(ifp, mb);
548 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
549 wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM;
551 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
552 wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
554 if (wqe->eth.cs_flags == 0) {
555 sq->stats.csum_offload_none++;
557 if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
559 u32 mss = mb->m_pkthdr.tso_segsz;
562 wqe->eth.mss = cpu_to_be16(mss);
563 opcode = MLX5_OPCODE_LSO;
565 args.ihs = mlx5e_get_full_header_size(mb, NULL);
566 if (unlikely(args.ihs == 0)) {
570 payload_len = mb->m_pkthdr.len - args.ihs;
571 if (payload_len == 0)
574 num_pkts = DIV_ROUND_UP(payload_len, mss);
575 sq->mbuf[pi].num_bytes = payload_len + (num_pkts * args.ihs);
577 sq->stats.tso_packets++;
578 sq->stats.tso_bytes += payload_len;
580 opcode = MLX5_OPCODE_SEND;
583 switch (sq->min_inline_mode) {
584 case MLX5_INLINE_MODE_IP:
585 case MLX5_INLINE_MODE_TCP_UDP:
586 args.ihs = mlx5e_get_full_header_size(mb, NULL);
587 if (unlikely(args.ihs == 0))
588 args.ihs = mlx5e_get_l2_header_size(sq, mb);
590 case MLX5_INLINE_MODE_L2:
591 args.ihs = mlx5e_get_l2_header_size(sq, mb);
593 case MLX5_INLINE_MODE_NONE:
596 if ((mb->m_flags & M_VLANTAG) != 0 &&
597 (sq->min_insert_caps & MLX5E_INSERT_VLAN) != 0) {
598 /* inlining VLAN data is not required */
599 wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */
600 wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag);
602 } else if ((mb->m_flags & M_VLANTAG) == 0 &&
603 (sq->min_insert_caps & MLX5E_INSERT_NON_VLAN) != 0) {
604 /* inlining non-VLAN data is not required */
607 /* we are forced to inlining L2 header, if any */
608 args.ihs = mlx5e_get_l2_header_size(sq, mb);
613 sq->mbuf[pi].num_bytes = max_t (unsigned int,
614 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
617 if (likely(args.ihs == 0)) {
618 /* nothing to inline */
619 } else if ((mb->m_flags & M_VLANTAG) != 0) {
620 struct ether_vlan_header *eh = (struct ether_vlan_header *)
621 wqe->eth.inline_hdr_start;
624 if (unlikely(args.ihs > (sq->max_inline - ETHER_VLAN_ENCAP_LEN))) {
625 if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
629 args.ihs = (sq->max_inline - ETHER_VLAN_ENCAP_LEN);
630 } else if (unlikely(args.ihs < ETHER_HDR_LEN)) {
634 m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
635 m_adj(mb, ETHER_HDR_LEN);
636 /* Insert 4 bytes VLAN tag into data stream */
637 eh->evl_proto = eh->evl_encap_proto;
638 eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
639 eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
640 /* Copy rest of header data, if any */
641 m_copydata(mb, 0, args.ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
642 m_adj(mb, args.ihs - ETHER_HDR_LEN);
643 /* Extend header by 4 bytes */
644 args.ihs += ETHER_VLAN_ENCAP_LEN;
645 wqe->eth.inline_hdr_sz = cpu_to_be16(args.ihs);
647 /* check if inline header size is too big */
648 if (unlikely(args.ihs > sq->max_inline)) {
649 if (unlikely(mb->m_pkthdr.csum_flags & CSUM_TSO)) {
653 args.ihs = sq->max_inline;
655 m_copydata(mb, 0, args.ihs, wqe->eth.inline_hdr_start);
657 wqe->eth.inline_hdr_sz = cpu_to_be16(args.ihs);
660 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
661 if (args.ihs > sizeof(wqe->eth.inline_hdr_start)) {
662 ds_cnt += DIV_ROUND_UP(args.ihs - sizeof(wqe->eth.inline_hdr_start),
665 dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
667 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
668 mb, segs, &nsegs, BUS_DMA_NOWAIT);
670 /* Update statistics */
671 sq->stats.defragged++;
672 /* Too many mbuf fragments */
673 mb = m_defrag(*mbp, M_NOWAIT);
679 err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
680 mb, segs, &nsegs, BUS_DMA_NOWAIT);
686 /* Make sure all mbuf data, if any, is visible to the bus */
688 bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map,
689 BUS_DMASYNC_PREWRITE);
691 /* All data was inlined, free the mbuf. */
692 bus_dmamap_unload(sq->dma_tag, sq->mbuf[pi].dma_map);
697 for (x = 0; x != nsegs; x++) {
698 if (segs[x].ds_len == 0)
700 dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr);
701 dseg->lkey = sq->mkey_be;
702 dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len);
706 ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl));
708 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
709 wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
710 wqe->ctrl.imm = cpu_to_be32(args.tisn << 8);
712 if (mlx5e_do_send_cqe_inline(sq))
713 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
715 wqe->ctrl.fm_ce_se = 0;
717 /* Copy data for doorbell */
718 memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
720 /* Store pointer to mbuf */
721 sq->mbuf[pi].mbuf = mb;
722 sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
723 sq->mbuf[pi].p_refcount = args.pref;
724 if (unlikely(args.pref != NULL))
725 atomic_add_int(args.pref, 1);
726 sq->pc += sq->mbuf[pi].num_wqebbs;
728 /* Count all traffic going out */
730 sq->stats.bytes += sq->mbuf[pi].num_bytes;
732 *mbp = NULL; /* safety clear */
743 mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
748 * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
749 * otherwise a cq overrun may occur
754 struct mlx5_cqe64 *cqe;
759 cqe = mlx5e_get_cqe(&sq->cq);
763 mlx5_cqwq_pop(&sq->cq.wq);
765 /* update budget according to the event factor */
766 budget -= sq->cev_factor;
768 for (x = 0; x != sq->cev_factor; x++) {
769 ci = sqcc & sq->wq.sz_m1;
770 mb = sq->mbuf[ci].mbuf;
771 sq->mbuf[ci].mbuf = NULL;
773 if (unlikely(sq->mbuf[ci].p_refcount != NULL)) {
774 atomic_add_int(sq->mbuf[ci].p_refcount, -1);
775 sq->mbuf[ci].p_refcount = NULL;
779 if (sq->mbuf[ci].num_bytes == 0) {
784 bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
785 BUS_DMASYNC_POSTWRITE);
786 bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
788 /* Free transmitted mbuf */
791 sqcc += sq->mbuf[ci].num_wqebbs;
795 mlx5_cqwq_update_db_record(&sq->cq.wq);
797 /* Ensure cq space is freed before enabling more cqes */
798 atomic_thread_fence_rel();
804 mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
808 if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
809 READ_ONCE(sq->running) == 0)) {
815 if (mlx5e_sq_xmit(sq, &mb) != 0) {
816 /* NOTE: m_freem() is NULL safe */
821 /* Check if we need to write the doorbell */
822 if (likely(sq->doorbell.d64 != 0)) {
823 mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
824 sq->doorbell.d64 = 0;
828 * Check if we need to start the event timer which flushes the
829 * transmit ring on timeout:
831 if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
832 sq->cev_factor != 1)) {
833 /* start the timer */
834 mlx5e_sq_cev_timeout(sq);
836 /* don't send NOPs yet */
837 sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
843 mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
848 if (mb->m_pkthdr.csum_flags & CSUM_SND_TAG) {
849 MPASS(mb->m_pkthdr.snd_tag->ifp == ifp);
850 sq = mlx5e_select_queue_by_send_tag(ifp, mb);
851 if (unlikely(sq == NULL)) {
856 sq = mlx5e_select_queue(ifp, mb);
857 if (unlikely(sq == NULL)) {
861 /* Invalid send queue */
867 ret = mlx5e_xmit_locked(ifp, sq, mb);
868 mtx_unlock(&sq->lock);
874 mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused)
876 struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
878 mtx_lock(&sq->comp_lock);
879 mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
880 mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
881 mtx_unlock(&sq->comp_lock);