2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/mlx4/cq.h>
37 #include <linux/mlx4/qp.h>
38 #include <linux/vmalloc.h>
40 #include <net/ethernet.h>
41 #include <net/if_vlan_var.h>
44 #include <netinet/in_systm.h>
45 #include <netinet/in.h>
46 #include <netinet/if_ether.h>
47 #include <netinet/ip.h>
48 #include <netinet/ip6.h>
49 #include <netinet/tcp.h>
50 #include <netinet/tcp_lro.h>
51 #include <netinet/udp.h>
54 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
58 static int inline_thold = MAX_INLINE;
60 module_param_named(inline_thold, inline_thold, int, 0444);
61 MODULE_PARM_DESC(inline_thold, "treshold for using inline data");
63 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
64 struct mlx4_en_tx_ring *ring, u32 size,
67 struct mlx4_en_dev *mdev = priv->mdev;
72 ring->size_mask = size - 1;
73 ring->stride = stride;
75 inline_thold = min(inline_thold, MAX_INLINE);
77 mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF);
78 mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF);
80 /* Allocate the buf ring */
81 ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF,
82 M_WAITOK, &ring->tx_lock.m);
83 if (ring->br == NULL) {
84 en_err(priv, "Failed allocating tx_info ring\n");
88 tmp = size * sizeof(struct mlx4_en_tx_info);
89 ring->tx_info = kmalloc(tmp, GFP_KERNEL);
91 en_err(priv, "Failed allocating tx_info ring\n");
95 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
98 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
99 if (!ring->bounce_buf) {
100 en_err(priv, "Failed allocating bounce buffer\n");
104 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
106 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
109 en_err(priv, "Failed allocating hwq resources\n");
113 err = mlx4_en_map_buffer(&ring->wqres.buf);
115 en_err(priv, "Failed to map TX buffer\n");
119 ring->buf = ring->wqres.buf.direct.buf;
121 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
122 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
123 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
125 err = mlx4_qp_reserve_range(mdev->dev, 1, 256, &ring->qpn);
127 en_err(priv, "Failed reserving qp for tx ring.\n");
131 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
133 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
136 ring->qp.event = mlx4_en_sqp_event;
138 err = mlx4_bf_alloc(mdev->dev, &ring->bf);
140 ring->bf.uar = &mdev->priv_uar;
141 ring->bf.uar->map = mdev->uar_map;
142 ring->bf_enabled = false;
144 ring->bf_enabled = true;
149 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
151 mlx4_en_unmap_buffer(&ring->wqres.buf);
153 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
155 kfree(ring->bounce_buf);
156 ring->bounce_buf = NULL;
158 buf_ring_free(ring->br, M_DEVBUF);
159 kfree(ring->tx_info);
160 ring->tx_info = NULL;
164 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
165 struct mlx4_en_tx_ring *ring)
167 struct mlx4_en_dev *mdev = priv->mdev;
168 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
170 buf_ring_free(ring->br, M_DEVBUF);
171 if (ring->bf_enabled)
172 mlx4_bf_free(mdev->dev, &ring->bf);
173 mlx4_qp_remove(mdev->dev, &ring->qp);
174 mlx4_qp_free(mdev->dev, &ring->qp);
175 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
176 mlx4_en_unmap_buffer(&ring->wqres.buf);
177 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
178 kfree(ring->bounce_buf);
179 ring->bounce_buf = NULL;
180 kfree(ring->tx_info);
181 ring->tx_info = NULL;
182 mtx_destroy(&ring->tx_lock.m);
183 mtx_destroy(&ring->comp_lock.m);
186 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
187 struct mlx4_en_tx_ring *ring,
190 struct mlx4_en_dev *mdev = priv->mdev;
195 ring->cons = 0xffffffff;
196 ring->last_nr_txbb = 1;
199 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
200 memset(ring->buf, 0, ring->buf_size);
202 ring->qp_state = MLX4_QP_STATE_RST;
203 ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
205 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
206 ring->cqn, &ring->context);
207 if (ring->bf_enabled)
208 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
210 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
211 &ring->qp, &ring->qp_state);
216 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
217 struct mlx4_en_tx_ring *ring)
219 struct mlx4_en_dev *mdev = priv->mdev;
221 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
222 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
226 static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
227 struct mlx4_en_tx_ring *ring,
230 struct mlx4_en_dev *mdev = priv->mdev;
231 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
232 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
233 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
234 struct mbuf *mb = tx_info->mb;
235 void *end = ring->buf + ring->buf_size;
236 int frags = tx_info->nr_segs;
238 __be32 *ptr = (__be32 *)tx_desc;
239 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
241 /* Optimize the common case when there are no wraparounds */
242 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
244 for (i = 0; i < frags; i++) {
245 pci_unmap_single(mdev->pdev,
246 (dma_addr_t) be64_to_cpu(data[i].addr),
247 data[i].byte_count, PCI_DMA_TODEVICE);
250 /* Stamp the freed descriptor */
251 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
258 for (i = 0; i < frags; i++) {
259 /* Check for wraparound before unmapping */
260 if ((void *) data >= end)
261 data = (struct mlx4_wqe_data_seg *) ring->buf;
262 pci_unmap_single(mdev->pdev,
263 (dma_addr_t) be64_to_cpu(data->addr),
264 data->byte_count, PCI_DMA_TODEVICE);
268 /* Stamp the freed descriptor */
269 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
272 if ((void *) ptr >= end) {
274 stamp ^= cpu_to_be32(0x80000000);
280 return tx_info->nr_txbb;
284 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
286 struct mlx4_en_priv *priv = netdev_priv(dev);
289 /* Skip last polled descriptor */
290 ring->cons += ring->last_nr_txbb;
291 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
292 ring->cons, ring->prod);
294 if ((u32) (ring->prod - ring->cons) > ring->size) {
295 en_warn(priv, "Tx consumer passed producer!\n");
299 while (ring->cons != ring->prod) {
300 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
301 ring->cons & ring->size_mask,
302 !!(ring->cons & ring->size));
303 ring->cons += ring->last_nr_txbb;
308 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
313 void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
315 int block = 8 / ring_num;
316 int extra = 8 - (block * ring_num);
322 for (prio = 0; prio < 8; prio++)
327 for (prio = 0; prio < 8; prio++) {
328 if (extra && (num == block + 1)) {
332 } else if (!extra && (num == block)) {
336 prio_map[prio] = ring;
337 en_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
342 static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
344 struct mlx4_en_priv *priv = netdev_priv(dev);
345 struct mlx4_cq *mcq = &cq->mcq;
346 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
347 struct mlx4_cqe *cqe = cq->buf;
350 u32 txbbs_skipped = 0;
353 /* index always points to the first TXBB of the last polled descriptor */
354 index = ring->cons & ring->size_mask;
355 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
356 if (index == new_index)
363 * We use a two-stage loop:
364 * - the first samples the HW-updated CQE
365 * - the second frees TXBBs until the last sample
366 * This lets us amortize CQE cache misses, while still polling the CQ
367 * until is quiescent.
369 cq_last_sav = mcq->cons_index;
372 /* Skip over last polled CQE */
373 index = (index + ring->last_nr_txbb) & ring->size_mask;
374 txbbs_skipped += ring->last_nr_txbb;
377 ring->last_nr_txbb = mlx4_en_free_tx_desc(
379 !!((ring->cons + txbbs_skipped) &
383 } while (index != new_index);
385 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
386 } while (index != new_index);
387 AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
388 (u32) (mcq->cons_index - cq_last_sav));
391 * To prevent CQ overflow we first update CQ consumer and only then
396 ring->cons += txbbs_skipped;
398 /* Wakeup Tx queue if this ring stopped it */
399 if (unlikely(ring->blocked)) {
400 if ((u32) (ring->prod - ring->cons) <=
401 ring->size - HEADROOM - MAX_DESC_TXBBS) {
403 if (atomic_fetchadd_int(&priv->blocked, -1) == 1)
404 atomic_clear_int(&dev->if_drv_flags,
406 priv->port_stats.wake_queue++;
411 void mlx4_en_tx_irq(struct mlx4_cq *mcq)
413 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
414 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
415 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
417 if (!spin_trylock(&ring->comp_lock))
419 mlx4_en_process_tx_cq(cq->dev, cq);
420 mod_timer(&cq->timer, jiffies + 1);
421 spin_unlock(&ring->comp_lock);
425 void mlx4_en_poll_tx_cq(unsigned long data)
427 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
428 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
429 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
432 INC_PERF_COUNTER(priv->pstats.tx_poll);
434 if (!spin_trylock(&ring->comp_lock)) {
435 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
438 mlx4_en_process_tx_cq(cq->dev, cq);
439 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
441 /* If there are still packets in flight and the timer has not already
442 * been scheduled by the Tx routine then schedule it here to guarantee
443 * completion processing of these packets */
444 if (inflight && priv->port_up)
445 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
447 spin_unlock(&ring->comp_lock);
450 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
451 struct mlx4_en_tx_ring *ring,
453 unsigned int desc_size)
455 u32 copy = (ring->size - index) * TXBB_SIZE;
458 for (i = desc_size - copy - 4; i >= 0; i -= 4) {
459 if ((i & (TXBB_SIZE - 1)) == 0)
462 *((u32 *) (ring->buf + i)) =
463 *((u32 *) (ring->bounce_buf + copy + i));
466 for (i = copy - 4; i >= 4 ; i -= 4) {
467 if ((i & (TXBB_SIZE - 1)) == 0)
470 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
471 *((u32 *) (ring->bounce_buf + i));
474 /* Return real descriptor location */
475 return ring->buf + index * TXBB_SIZE;
478 static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
480 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
481 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
483 /* If we don't have a pending timer, set one up to catch our recent
484 post in case the interface becomes idle */
485 if (!timer_pending(&cq->timer))
486 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
488 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
489 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
490 if (spin_trylock(&ring->comp_lock)) {
491 mlx4_en_process_tx_cq(priv->dev, cq);
492 spin_unlock(&ring->comp_lock);
496 static int is_inline(struct mbuf *mb)
499 if (inline_thold && mb->m_pkthdr.len <= inline_thold &&
500 (mb->m_pkthdr.csum_flags & CSUM_TSO) == 0)
506 static int inline_size(struct mbuf *mb)
510 len = mb->m_pkthdr.len;
511 if (len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
512 <= MLX4_INLINE_ALIGN)
513 return ALIGN(len + CTRL_SIZE +
514 sizeof(struct mlx4_wqe_inline_seg), 16);
516 return ALIGN(len + CTRL_SIZE + 2 *
517 sizeof(struct mlx4_wqe_inline_seg), 16);
520 static int get_head_size(struct mbuf *mb)
524 int ip_hlen, tcp_hlen;
528 if (mb->m_len < len + sizeof(struct ip))
530 ip = (struct ip *)(mtod(mb, char *) + len);
531 if (ip->ip_p != IPPROTO_TCP)
533 ip_hlen = ip->ip_hl << 2;
535 if (mb->m_len < len + sizeof(struct tcphdr))
537 th = (struct tcphdr *)(mtod(mb, char *) + len);
538 tcp_hlen = th->th_off << 2;
545 static int get_real_size(struct mbuf *mb, struct net_device *dev, int *segsp,
546 int *lso_header_size)
552 for (m = mb; m != NULL; m = m->m_next)
556 if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
557 *lso_header_size = get_head_size(mb);
558 if (*lso_header_size) {
559 if (mb->m_len == *lso_header_size)
562 return CTRL_SIZE + nr_segs * DS_SIZE +
563 ALIGN(*lso_header_size + 4, DS_SIZE);
566 *lso_header_size = 0;
569 return inline_size(mb);
570 return (CTRL_SIZE + nr_segs * DS_SIZE);
573 static struct mbuf *mb_copy(struct mbuf *mb, int *offp, char *data, int len)
580 bytes = min(mb->m_len - off, len);
582 memcpy(data, mb->m_data + off, bytes);
586 if (off == mb->m_len) {
595 static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct mbuf *mb,
596 int real_size, u16 *vlan_tag, int tx_ind)
598 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
599 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
604 len = mb->m_pkthdr.len;
606 inl->byte_count = cpu_to_be32(1 << 31 | len);
607 mb_copy(mb, &off, (void *)(inl + 1), len);
609 inl->byte_count = cpu_to_be32(1 << 31 | spc);
610 mb = mb_copy(mb, &off, (void *)(inl + 1), spc);
611 inl = (void *) (inl + 1) + spc;
612 mb_copy(mb, &off, (void *)(inl + 1), len - spc);
614 inl->byte_count = cpu_to_be32(1 << 31 | (len - spc));
616 tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
617 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
618 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
621 u16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb)
623 struct mlx4_en_priv *priv = netdev_priv(dev);
624 struct mlx4_en_tx_hash_entry *entry;
625 struct ether_header *eth;
633 /* Obtain VLAN information if present */
634 if (mb->m_flags & M_VLANTAG) {
635 vlan_tag = mb->m_pkthdr.ether_vtag;
636 /* Set the Tx ring to use according to vlan priority */
637 tx_ind = priv->tx_prio_map[vlan_tag >> 13];
642 ETHER_HDR_LEN + sizeof(struct ip) + sizeof(struct tcphdr))
643 return MLX4_EN_NUM_HASH_RINGS;
644 eth = mtod(mb, struct ether_header *);
645 /* Hashing is only done for TCP/IP or UDP/IP packets */
646 if (be16_to_cpu(eth->ether_type) != ETHERTYPE_IP)
647 return MLX4_EN_NUM_HASH_RINGS;
649 iph = (struct ip *)(mtod(mb, char *) + len);
650 len += iph->ip_hl << 2;
651 th = (struct tcphdr *)(mtod(mb, char *) + len);
652 hash_index = be32_to_cpu(iph->ip_dst.s_addr) & MLX4_EN_TX_HASH_MASK;
657 if (mb->m_len < len + sizeof(struct tcphdr))
658 return MLX4_EN_NUM_HASH_RINGS;
660 (hash_index ^ be16_to_cpu(th->th_dport ^ th->th_sport)) &
661 MLX4_EN_TX_HASH_MASK;
664 return MLX4_EN_NUM_HASH_RINGS;
667 entry = &priv->tx_hash[hash_index];
668 if(unlikely(!entry->cnt)) {
669 tx_ind = hash_index & (MLX4_EN_NUM_HASH_RINGS / 2 - 1);
670 if (2 * entry->small_pkts > entry->big_pkts)
671 tx_ind += MLX4_EN_NUM_HASH_RINGS / 2;
672 entry->small_pkts = entry->big_pkts = 0;
673 entry->ring = tx_ind;
677 if (mb->m_pkthdr.len > MLX4_EN_SMALL_PKT_SIZE)
684 static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
686 __iowrite64_copy(dst, src, bytecnt / 8);
689 static int mlx4_en_xmit(struct net_device *dev, int tx_ind, struct mbuf **mbp)
691 struct mlx4_en_priv *priv = netdev_priv(dev);
692 struct mlx4_en_dev *mdev = priv->mdev;
693 struct mlx4_en_tx_ring *ring;
694 struct mlx4_en_cq *cq;
695 struct mlx4_en_tx_desc *tx_desc;
696 struct mlx4_wqe_data_seg *data;
697 struct mlx4_en_tx_info *tx_info;
713 ring = &priv->tx_ring[tx_ind];
719 real_size = get_real_size(mb, dev, &nr_segs, &lso_header_size);
720 if (unlikely(!real_size))
723 /* Allign descriptor to TXBB size */
724 desc_size = ALIGN(real_size, TXBB_SIZE);
725 nr_txbb = desc_size / TXBB_SIZE;
726 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
728 mb = m_defrag(*mbp, M_DONTWAIT);
740 /* Check available TXBBs And 2K spare for prefetch */
741 if (unlikely(((int)(ring->prod - ring->cons)) >
742 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
743 /* every full Tx ring stops queue */
744 if (ring->blocked == 0)
745 atomic_add_int(&priv->blocked, 1);
746 atomic_set_int(&dev->if_drv_flags, IFF_DRV_OACTIVE);
748 priv->port_stats.queue_stopped++;
750 /* Use interrupts to find out when queue opened */
751 cq = &priv->tx_cq[tx_ind];
752 mlx4_en_arm_cq(priv, cq);
756 /* Track current inflight packets for performance analysis */
757 AVG_PERF_COUNTER(priv->pstats.inflight_avg,
758 (u32) (ring->prod - ring->cons - 1));
760 /* Packet is good - grab an index and transmit it */
761 index = ring->prod & ring->size_mask;
762 bf_index = ring->prod;
764 /* See if we have enough space for whole descriptor TXBB for setting
765 * SW ownership on next descriptor; if not, use a bounce buffer. */
766 if (likely(index + nr_txbb <= ring->size))
767 tx_desc = ring->buf + index * TXBB_SIZE;
769 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
773 /* Prepare ctrl segement apart opcode+ownership, which depends on
774 * whether LSO is used */
775 if (mb->m_flags & M_VLANTAG)
776 vlan_tag = mb->m_pkthdr.ether_vtag;
777 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
778 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
779 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
780 tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
781 MLX4_WQE_CTRL_SOLICITED);
782 if (mb->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP)) {
783 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
784 MLX4_WQE_CTRL_TCP_UDP_CSUM);
785 priv->port_stats.tx_chksum_offload++;
788 if (unlikely(priv->validate_loopback)) {
789 /* Copy dst mac address to wqe */
790 struct ether_header *ethh;
794 ethh = mtod(mb, struct ether_header *);
795 mac = mlx4_en_mac_to_u64(ethh->ether_dhost);
797 mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
798 mac_l = (u32) (mac & 0xffffffff);
799 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
800 tx_desc->ctrl.imm = cpu_to_be32(mac_l);
804 /* Handle LSO (TSO) packets */
805 if (lso_header_size) {
808 /* Mark opcode as LSO */
809 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
810 ((ring->prod & ring->size) ?
811 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
813 /* Fill in the LSO prefix */
814 tx_desc->lso.mss_hdr_size = cpu_to_be32(
815 mb->m_pkthdr.tso_segsz << 16 | lso_header_size);
818 * note that we already verified that it is linear */
819 memcpy(tx_desc->lso.header, mb->m_data, lso_header_size);
820 data = ((void *) &tx_desc->lso +
821 ALIGN(lso_header_size + 4, DS_SIZE));
823 priv->port_stats.tso_packets++;
824 segsz = mb->m_pkthdr.tso_segsz;
825 i = ((mb->m_pkthdr.len - lso_header_size) / segsz) +
826 !!((mb->m_pkthdr.len - lso_header_size) % segsz);
827 ring->bytes += mb->m_pkthdr.len + (i - 1) * lso_header_size;
829 mb->m_data += lso_header_size;
830 mb->m_len -= lso_header_size;
832 /* Normal (Non LSO) packet */
833 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
834 ((ring->prod & ring->size) ?
835 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
836 data = &tx_desc->data;
837 ring->bytes += max(mb->m_pkthdr.len,
838 (unsigned int)ETHER_MIN_LEN - ETHER_CRC_LEN);
842 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len);
844 /* Save mb in tx_info ring */
845 tx_info = &ring->tx_info[index];
847 tx_info->nr_txbb = nr_txbb;
848 tx_info->nr_segs = nr_segs;
849 /* valid only for non inline segments */
850 tx_info->data_offset = (void *) data - (void *) tx_desc;
852 if (!is_inline(mb)) {
853 for (i = 0, m = mb; i < nr_segs; i++, m = m->m_next) {
858 dma = pci_map_single(mdev->dev->pdev, m->m_data,
859 m->m_len, PCI_DMA_TODEVICE);
860 data->addr = cpu_to_be64(dma);
861 data->lkey = cpu_to_be32(mdev->mr.key);
863 data->byte_count = cpu_to_be32(m->m_len);
866 if (lso_header_size) {
867 mb->m_data -= lso_header_size;
868 mb->m_len += lso_header_size;
872 build_inline_wqe(tx_desc, mb, real_size, &vlan_tag, tx_ind);
876 ring->prod += nr_txbb;
878 /* If we used a bounce buffer then copy descriptor back into place */
880 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
882 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
883 *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
884 op_own |= htonl((bf_index & 0xffff) << 8);
885 /* Ensure new descirptor hits memory
886 * before setting ownership of this descriptor to HW */
888 tx_desc->ctrl.owner_opcode = op_own;
892 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
897 ring->bf.offset ^= ring->bf.buf_size;
899 /* Ensure new descirptor hits memory
900 * before setting ownership of this descriptor to HW */
902 tx_desc->ctrl.owner_opcode = op_own;
904 writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
918 mlx4_en_transmit_locked(struct ifnet *dev, int tx_ind, struct mbuf *m)
920 struct mlx4_en_priv *priv = netdev_priv(dev);
921 struct mlx4_en_tx_ring *ring;
923 int enqueued, err = 0;
925 ring = &priv->tx_ring[tx_ind];
926 if ((dev->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
927 IFF_DRV_RUNNING || priv->port_up == 0) {
929 err = drbr_enqueue(dev, ring->br, m);
935 next = drbr_dequeue(dev, ring->br);
936 } else if (drbr_needs_enqueue(dev, ring->br)) {
937 if ((err = drbr_enqueue(dev, ring->br, m)) != 0)
939 next = drbr_dequeue(dev, ring->br);
943 /* Process the queue */
944 while (next != NULL) {
945 if ((err = mlx4_en_xmit(dev, tx_ind, &next)) != 0) {
947 err = drbr_enqueue(dev, ring->br, next);
951 drbr_stats_update(dev, next->m_pkthdr.len, next->m_flags);
952 /* Send a copy of the frame to the BPF listener */
953 ETHER_BPF_MTAP(dev, next);
954 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0)
956 next = drbr_dequeue(dev, ring->br);
960 ring->watchdog_time = ticks;
966 mlx4_en_tx_que(void *context, int pending)
968 struct mlx4_en_tx_ring *ring;
969 struct mlx4_en_priv *priv;
970 struct net_device *dev;
971 struct mlx4_en_cq *cq;
976 priv = dev->if_softc;
978 ring = &priv->tx_ring[tx_ind];
979 if (dev->if_drv_flags & IFF_DRV_RUNNING) {
980 mlx4_en_xmit_poll(priv, tx_ind);
981 spin_lock(&ring->tx_lock);
982 if (!drbr_empty(dev, ring->br))
983 mlx4_en_transmit_locked(dev, tx_ind, NULL);
984 spin_unlock(&ring->tx_lock);
989 mlx4_en_transmit(struct ifnet *dev, struct mbuf *m)
991 struct mlx4_en_priv *priv = netdev_priv(dev);
992 struct mlx4_en_tx_ring *ring;
993 struct mlx4_en_cq *cq;
996 /* Which queue to use */
997 if ((m->m_flags & (M_FLOWID | M_VLANTAG)) == M_FLOWID)
998 i = m->m_pkthdr.flowid % (MLX4_EN_NUM_HASH_RINGS - 1);
1000 i = mlx4_en_select_queue(dev, m);
1002 ring = &priv->tx_ring[i];
1004 if (spin_trylock(&ring->tx_lock)) {
1005 err = mlx4_en_transmit_locked(dev, i, m);
1006 spin_unlock(&ring->tx_lock);
1008 mlx4_en_xmit_poll(priv, i);
1010 err = drbr_enqueue(dev, ring->br, m);
1011 cq = &priv->tx_cq[i];
1012 taskqueue_enqueue(cq->tq, &cq->cq_task);
1019 * Flush ring buffers.
1022 mlx4_en_qflush(struct ifnet *dev)
1024 struct mlx4_en_priv *priv = netdev_priv(dev);
1025 struct mlx4_en_tx_ring *ring = priv->tx_ring;
1028 for (int i = 0; i < priv->tx_ring_num; i++, ring++) {
1029 spin_lock(&ring->tx_lock);
1030 while ((m = buf_ring_dequeue_sc(ring->br)) != NULL)
1032 spin_unlock(&ring->tx_lock);