2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #define LINUXKPI_PARAM_PREFIX mlx4_
36 #include <linux/page.h>
37 #include <dev/mlx4/cq.h>
38 #include <linux/slab.h>
39 #include <dev/mlx4/qp.h>
40 #include <linux/if_vlan.h>
41 #include <linux/vmalloc.h>
42 #include <linux/moduleparam.h>
44 #include <netinet/in_systm.h>
45 #include <netinet/in.h>
46 #include <netinet/if_ether.h>
47 #include <netinet/ip.h>
48 #include <netinet/ip6.h>
49 #include <netinet/tcp.h>
50 #include <netinet/tcp_lro.h>
51 #include <netinet/udp.h>
56 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
61 static int inline_thold __read_mostly = MAX_INLINE;
63 module_param_named(inline_thold, inline_thold, uint, 0444);
64 MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
66 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
67 struct mlx4_en_tx_ring **pring, u32 size,
68 u16 stride, int node, int queue_idx)
70 struct mlx4_en_dev *mdev = priv->mdev;
71 struct mlx4_en_tx_ring *ring;
76 ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node);
78 ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL);
80 en_err(priv, "Failed allocating TX ring\n");
85 /* Create DMA descriptor TAG */
86 if ((err = -bus_dma_tag_create(
87 bus_get_dma_tag(mdev->pdev->dev.bsddev),
88 1, /* any alignment */
90 BUS_SPACE_MAXADDR, /* lowaddr */
91 BUS_SPACE_MAXADDR, /* highaddr */
92 NULL, NULL, /* filter, filterarg */
93 MLX4_EN_TX_MAX_PAYLOAD_SIZE, /* maxsize */
94 MLX4_EN_TX_MAX_MBUF_FRAGS, /* nsegments */
95 MLX4_EN_TX_MAX_MBUF_SIZE, /* maxsegsize */
97 NULL, NULL, /* lockfunc, lockfuncarg */
102 ring->size_mask = size - 1;
103 ring->stride = stride;
104 ring->inline_thold = MAX(MIN_PKT_LEN, MIN(inline_thold, MAX_INLINE));
105 mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF);
106 mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF);
108 /* Allocate the buf ring */
109 ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF,
110 M_WAITOK, &ring->tx_lock.m);
111 if (ring->br == NULL) {
112 en_err(priv, "Failed allocating tx_info ring\n");
114 goto err_free_dma_tag;
117 tmp = size * sizeof(struct mlx4_en_tx_info);
118 ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node);
119 if (!ring->tx_info) {
120 ring->tx_info = kzalloc(tmp, GFP_KERNEL);
121 if (!ring->tx_info) {
127 /* Create DMA descriptor MAPs */
128 for (x = 0; x != size; x++) {
129 err = -bus_dmamap_create(ring->dma_tag, 0,
130 &ring->tx_info[x].dma_map);
133 bus_dmamap_destroy(ring->dma_tag,
134 ring->tx_info[x].dma_map);
140 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
143 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
145 /* Allocate HW buffers on provided NUMA node */
146 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
149 en_err(priv, "Failed allocating hwq resources\n");
153 err = mlx4_en_map_buffer(&ring->wqres.buf);
155 en_err(priv, "Failed to map TX buffer\n");
159 ring->buf = ring->wqres.buf.direct.buf;
161 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
162 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
163 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
165 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
168 en_err(priv, "failed reserving qp for TX ring\n");
172 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
174 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
177 ring->qp.event = mlx4_en_sqp_event;
179 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
181 en_dbg(DRV, priv, "working without blueflame (%d)", err);
182 ring->bf.uar = &mdev->priv_uar;
183 ring->bf.uar->map = mdev->uar_map;
184 ring->bf_enabled = false;
186 ring->bf_enabled = true;
187 ring->queue_index = queue_idx;
188 if (queue_idx < priv->num_tx_rings_p_up )
189 CPU_SET(queue_idx, &ring->affinity_mask);
195 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
197 mlx4_en_unmap_buffer(&ring->wqres.buf);
199 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
201 for (x = 0; x != size; x++)
202 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map);
204 vfree(ring->tx_info);
206 buf_ring_free(ring->br, M_DEVBUF);
208 bus_dma_tag_destroy(ring->dma_tag);
214 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
215 struct mlx4_en_tx_ring **pring)
217 struct mlx4_en_dev *mdev = priv->mdev;
218 struct mlx4_en_tx_ring *ring = *pring;
220 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
222 buf_ring_free(ring->br, M_DEVBUF);
223 if (ring->bf_enabled)
224 mlx4_bf_free(mdev->dev, &ring->bf);
225 mlx4_qp_remove(mdev->dev, &ring->qp);
226 mlx4_qp_free(mdev->dev, &ring->qp);
227 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
228 mlx4_en_unmap_buffer(&ring->wqres.buf);
229 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
230 for (x = 0; x != ring->size; x++)
231 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map);
232 vfree(ring->tx_info);
233 mtx_destroy(&ring->tx_lock.m);
234 mtx_destroy(&ring->comp_lock.m);
235 bus_dma_tag_destroy(ring->dma_tag);
240 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
241 struct mlx4_en_tx_ring *ring,
242 int cq, int user_prio)
244 struct mlx4_en_dev *mdev = priv->mdev;
249 ring->cons = 0xffffffff;
250 ring->last_nr_txbb = 1;
253 memset(ring->buf, 0, ring->buf_size);
255 ring->qp_state = MLX4_QP_STATE_RST;
256 ring->doorbell_qpn = ring->qp.qpn << 8;
258 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
259 ring->cqn, user_prio, &ring->context);
260 if (ring->bf_enabled)
261 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
263 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
264 &ring->qp, &ring->qp_state);
268 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
269 struct mlx4_en_tx_ring *ring)
271 struct mlx4_en_dev *mdev = priv->mdev;
273 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
274 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
277 static volatile struct mlx4_wqe_data_seg *
278 mlx4_en_store_inline_lso_data(volatile struct mlx4_wqe_data_seg *dseg,
279 struct mbuf *mb, int len, __be32 owner_bit)
281 uint8_t *inl = __DEVOLATILE(uint8_t *, dseg);
283 /* copy data into place */
284 m_copydata(mb, 0, len, inl + 4);
285 dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT);
290 mlx4_en_store_inline_lso_header(volatile struct mlx4_wqe_data_seg *dseg,
291 int len, __be32 owner_bit)
296 mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
297 struct mlx4_en_tx_ring *ring, u32 index, u8 owner)
299 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
300 struct mlx4_en_tx_desc *tx_desc = (struct mlx4_en_tx_desc *)
301 (ring->buf + (index * TXBB_SIZE));
302 volatile __be32 *ptr = (__be32 *)tx_desc;
303 const __be32 stamp = cpu_to_be32(STAMP_VAL |
304 ((u32)owner << STAMP_SHIFT));
307 /* Stamp the freed descriptor */
308 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
315 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
316 struct mlx4_en_tx_ring *ring, u32 index)
318 struct mlx4_en_tx_info *tx_info;
321 tx_info = &ring->tx_info[index];
327 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
328 BUS_DMASYNC_POSTWRITE);
329 bus_dmamap_unload(ring->dma_tag, tx_info->dma_map);
333 return (tx_info->nr_txbb);
336 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
338 struct mlx4_en_priv *priv = netdev_priv(dev);
341 /* Skip last polled descriptor */
342 ring->cons += ring->last_nr_txbb;
343 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
344 ring->cons, ring->prod);
346 if ((u32) (ring->prod - ring->cons) > ring->size) {
347 en_warn(priv, "Tx consumer passed producer!\n");
351 while (ring->cons != ring->prod) {
352 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
353 ring->cons & ring->size_mask);
354 ring->cons += ring->last_nr_txbb;
359 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
365 mlx4_en_tx_ring_is_full(struct mlx4_en_tx_ring *ring)
368 wqs = ring->size - (ring->prod - ring->cons);
369 return (wqs < (HEADROOM + (2 * MLX4_EN_TX_WQE_MAX_WQEBBS)));
372 static int mlx4_en_process_tx_cq(struct net_device *dev,
373 struct mlx4_en_cq *cq)
375 struct mlx4_en_priv *priv = netdev_priv(dev);
376 struct mlx4_cq *mcq = &cq->mcq;
377 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
378 struct mlx4_cqe *cqe;
380 u16 new_index, ring_index, stamp_index;
381 u32 txbbs_skipped = 0;
383 u32 cons_index = mcq->cons_index;
385 u32 size_mask = ring->size_mask;
386 struct mlx4_cqe *buf = cq->buf;
387 int factor = priv->cqe_factor;
392 index = cons_index & size_mask;
393 cqe = &buf[(index << factor) + factor];
394 ring_index = ring->cons & size_mask;
395 stamp_index = ring_index;
397 /* Process all completed CQEs */
398 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
399 cons_index & size)) {
401 * make sure we read the CQE after we read the
406 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
407 MLX4_CQE_OPCODE_ERROR)) {
408 en_err(priv, "CQE completed in error - vendor syndrom: 0x%x syndrom: 0x%x\n",
409 ((struct mlx4_err_cqe *)cqe)->
411 ((struct mlx4_err_cqe *)cqe)->syndrome);
414 /* Skip over last polled CQE */
415 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
418 txbbs_skipped += ring->last_nr_txbb;
419 ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
420 /* free next descriptor */
421 ring->last_nr_txbb = mlx4_en_free_tx_desc(
422 priv, ring, ring_index);
423 mlx4_en_stamp_wqe(priv, ring, stamp_index,
424 !!((ring->cons + txbbs_stamp) &
426 stamp_index = ring_index;
427 txbbs_stamp = txbbs_skipped;
428 } while (ring_index != new_index);
431 index = cons_index & size_mask;
432 cqe = &buf[(index << factor) + factor];
437 * To prevent CQ overflow we first update CQ consumer and only then
440 mcq->cons_index = cons_index;
443 ring->cons += txbbs_skipped;
445 /* Wakeup Tx queue if it was stopped and ring is not full */
446 if (unlikely(ring->blocked) && !mlx4_en_tx_ring_is_full(ring)) {
448 if (atomic_fetchadd_int(&priv->blocked, -1) == 1)
449 atomic_clear_int(&dev->if_drv_flags ,IFF_DRV_OACTIVE);
451 priv->port_stats.wake_queue++;
456 void mlx4_en_tx_irq(struct mlx4_cq *mcq)
458 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
459 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
460 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
462 if (priv->port_up == 0 || !spin_trylock(&ring->comp_lock))
464 mlx4_en_process_tx_cq(cq->dev, cq);
465 mod_timer(&cq->timer, jiffies + 1);
466 spin_unlock(&ring->comp_lock);
469 void mlx4_en_poll_tx_cq(unsigned long data)
471 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
472 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
473 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
476 INC_PERF_COUNTER(priv->pstats.tx_poll);
478 if (priv->port_up == 0)
480 if (!spin_trylock(&ring->comp_lock)) {
481 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
484 mlx4_en_process_tx_cq(cq->dev, cq);
485 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
487 /* If there are still packets in flight and the timer has not already
488 * been scheduled by the Tx routine then schedule it here to guarantee
489 * completion processing of these packets */
490 if (inflight && priv->port_up)
491 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
493 spin_unlock(&ring->comp_lock);
496 static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
498 struct mlx4_en_cq *cq = priv->tx_cq[tx_ind];
499 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
501 if (priv->port_up == 0)
504 /* If we don't have a pending timer, set one up to catch our recent
505 post in case the interface becomes idle */
506 if (!timer_pending(&cq->timer))
507 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
509 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
510 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
511 if (spin_trylock(&ring->comp_lock)) {
512 mlx4_en_process_tx_cq(priv->dev, cq);
513 spin_unlock(&ring->comp_lock);
518 mlx4_en_get_inline_hdr_size(struct mlx4_en_tx_ring *ring, struct mbuf *mb)
522 /* only copy from first fragment, if possible */
523 retval = MIN(ring->inline_thold, mb->m_len);
525 /* check for too little data */
526 if (unlikely(retval < MIN_PKT_LEN))
527 retval = MIN(ring->inline_thold, mb->m_pkthdr.len);
532 mlx4_en_get_header_size(struct mbuf *mb)
534 struct ether_vlan_header *eh;
537 int ip_hlen, tcp_hlen;
542 eh = mtod(mb, struct ether_vlan_header *);
543 if (mb->m_len < ETHER_HDR_LEN)
545 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
546 eth_type = ntohs(eh->evl_proto);
547 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
549 eth_type = ntohs(eh->evl_encap_proto);
550 eth_hdr_len = ETHER_HDR_LEN;
552 if (mb->m_len < eth_hdr_len)
556 ip = (struct ip *)(mb->m_data + eth_hdr_len);
557 if (mb->m_len < eth_hdr_len + sizeof(*ip))
559 if (ip->ip_p != IPPROTO_TCP)
561 ip_hlen = ip->ip_hl << 2;
562 eth_hdr_len += ip_hlen;
565 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len);
566 if (mb->m_len < eth_hdr_len + sizeof(*ip6))
568 if (ip6->ip6_nxt != IPPROTO_TCP)
570 eth_hdr_len += sizeof(*ip6);
575 if (mb->m_len < eth_hdr_len + sizeof(*th))
577 th = (struct tcphdr *)(mb->m_data + eth_hdr_len);
578 tcp_hlen = th->th_off << 2;
579 eth_hdr_len += tcp_hlen;
580 if (mb->m_len < eth_hdr_len)
582 return (eth_hdr_len);
585 static volatile struct mlx4_wqe_data_seg *
586 mlx4_en_store_inline_data(volatile struct mlx4_wqe_data_seg *dseg,
587 struct mbuf *mb, int len, __be32 owner_bit)
589 uint8_t *inl = __DEVOLATILE(uint8_t *, dseg);
590 const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4;
592 if (unlikely(len < MIN_PKT_LEN)) {
593 m_copydata(mb, 0, len, inl + 4);
594 memset(inl + 4 + len, 0, MIN_PKT_LEN - len);
595 dseg += DIV_ROUND_UP(4 + MIN_PKT_LEN, DS_SIZE_ALIGNMENT);
596 } else if (len <= spc) {
597 m_copydata(mb, 0, len, inl + 4);
598 dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT);
600 m_copydata(mb, 0, spc, inl + 4);
601 m_copydata(mb, spc, len - spc, inl + 8 + spc);
602 dseg += DIV_ROUND_UP(8 + len, DS_SIZE_ALIGNMENT);
608 mlx4_en_store_inline_header(volatile struct mlx4_wqe_data_seg *dseg,
609 int len, __be32 owner_bit)
611 uint8_t *inl = __DEVOLATILE(uint8_t *, dseg);
612 const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4;
614 if (unlikely(len < MIN_PKT_LEN)) {
615 *(volatile uint32_t *)inl =
616 SET_BYTE_COUNT((1 << 31) | MIN_PKT_LEN);
617 } else if (len <= spc) {
618 *(volatile uint32_t *)inl =
619 SET_BYTE_COUNT((1 << 31) | len);
621 *(volatile uint32_t *)(inl + 4 + spc) =
622 SET_BYTE_COUNT((1 << 31) | (len - spc));
624 *(volatile uint32_t *)inl =
625 SET_BYTE_COUNT((1 << 31) | spc);
629 static uint32_t hashrandom;
630 static void hashrandom_init(void *arg)
633 * It is assumed that the random subsystem has been
634 * initialized when this function is called:
636 hashrandom = m_ether_tcpip_hash_init();
638 SYSINIT(hashrandom_init, SI_SUB_RANDOM, SI_ORDER_ANY, &hashrandom_init, NULL);
640 u16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb)
642 struct mlx4_en_priv *priv = netdev_priv(dev);
643 u32 rings_p_up = priv->num_tx_rings_p_up;
647 #if (MLX4_EN_NUM_UP > 1)
648 /* Obtain VLAN information if present */
649 if (mb->m_flags & M_VLANTAG) {
650 u32 vlan_tag = mb->m_pkthdr.ether_vtag;
651 up = (vlan_tag >> 13) % MLX4_EN_NUM_UP;
654 queue_index = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | MBUF_HASHFLAG_L4, mb, hashrandom);
656 return ((queue_index % rings_p_up) + (up * rings_p_up));
659 static void mlx4_bf_copy(void __iomem *dst, volatile unsigned long *src, unsigned bytecnt)
661 __iowrite64_copy(dst, __DEVOLATILE(void *, src), bytecnt / 8);
664 static u64 mlx4_en_mac_to_u64(u8 *addr)
669 for (i = 0; i < ETHER_ADDR_LEN; i++) {
676 static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp)
679 DS_FACT = TXBB_SIZE / DS_SIZE_ALIGNMENT,
680 CTRL_FLAGS = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
681 MLX4_WQE_CTRL_SOLICITED),
683 bus_dma_segment_t segs[MLX4_EN_TX_MAX_MBUF_FRAGS];
684 volatile struct mlx4_wqe_data_seg *dseg;
685 volatile struct mlx4_wqe_data_seg *dseg_inline;
686 volatile struct mlx4_en_tx_desc *tx_desc;
687 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
688 struct ifnet *ifp = priv->dev;
689 struct mlx4_en_tx_info *tx_info;
690 struct mbuf *mb = *mbp;
703 if (unlikely(!priv->port_up)) {
708 /* check if TX ring is full */
709 if (unlikely(mlx4_en_tx_ring_is_full(ring))) {
710 /* every full native Tx ring stops queue */
711 if (ring->blocked == 0)
712 atomic_add_int(&priv->blocked, 1);
713 /* Set HW-queue-is-full flag */
714 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
715 priv->port_stats.queue_stopped++;
717 ring->queue_stopped++;
719 /* Use interrupts to find out when queue opened */
720 mlx4_en_arm_cq(priv, priv->tx_cq[tx_ind]);
724 /* sanity check we are not wrapping around */
725 KASSERT(((~ring->prod) & ring->size_mask) >=
726 (MLX4_EN_TX_WQE_MAX_WQEBBS - 1), ("Wrapping around TX ring"));
728 /* Track current inflight packets for performance analysis */
729 AVG_PERF_COUNTER(priv->pstats.inflight_avg,
730 (u32) (ring->prod - ring->cons - 1));
732 /* Track current mbuf packet header length */
733 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len);
735 /* Grab an index and try to transmit packet */
736 owner_bit = (ring->prod & ring->size) ?
737 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0;
738 index = ring->prod & ring->size_mask;
739 tx_desc = (volatile struct mlx4_en_tx_desc *)
740 (ring->buf + index * TXBB_SIZE);
741 tx_info = &ring->tx_info[index];
742 dseg = &tx_desc->data;
744 /* send a copy of the frame to the BPF listener, if any */
745 if (ifp != NULL && ifp->if_bpf != NULL)
746 ETHER_BPF_MTAP(ifp, mb);
748 /* get default flags */
749 tx_desc->ctrl.srcrb_flags = CTRL_FLAGS;
751 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))
752 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
754 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP |
755 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
756 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_TCP_UDP_CSUM);
759 if (likely(tx_desc->ctrl.srcrb_flags != CTRL_FLAGS)) {
760 priv->port_stats.tx_chksum_offload++;
764 /* check for VLAN tag */
765 if (mb->m_flags & M_VLANTAG) {
766 tx_desc->ctrl.vlan_tag = cpu_to_be16(mb->m_pkthdr.ether_vtag);
767 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN;
769 tx_desc->ctrl.vlan_tag = 0;
770 tx_desc->ctrl.ins_vlan = 0;
773 /* clear immediate field */
774 tx_desc->ctrl.imm = 0;
776 /* Handle LSO (TSO) packets */
777 if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
779 u32 mss = mb->m_pkthdr.tso_segsz;
782 opcode = cpu_to_be32(MLX4_OPCODE_LSO | MLX4_WQE_CTRL_RR) |
784 ihs = mlx4_en_get_header_size(mb);
785 if (unlikely(ihs > MAX_INLINE)) {
786 ring->oversized_packets++;
790 tx_desc->lso.mss_hdr_size = cpu_to_be32((mss << 16) | ihs);
791 payload_len = mb->m_pkthdr.len - ihs;
792 if (unlikely(payload_len == 0))
795 num_pkts = DIV_ROUND_UP(payload_len, mss);
796 ring->bytes += payload_len + (num_pkts * ihs);
797 ring->packets += num_pkts;
798 priv->port_stats.tso_packets++;
799 /* store pointer to inline header */
801 /* copy data inline */
802 dseg = mlx4_en_store_inline_lso_data(dseg,
805 opcode = cpu_to_be32(MLX4_OPCODE_SEND) |
807 ihs = mlx4_en_get_inline_hdr_size(ring, mb);
808 ring->bytes += max_t (unsigned int,
809 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
811 /* store pointer to inline header */
813 /* copy data inline */
814 dseg = mlx4_en_store_inline_data(dseg,
819 /* trim off empty mbufs */
820 while (mb->m_len == 0) {
822 /* check if all data has been inlined */
829 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map,
830 mb, segs, &nr_segs, BUS_DMA_NOWAIT);
831 if (unlikely(err == EFBIG)) {
832 /* Too many mbuf fragments */
833 m = m_defrag(mb, M_NOWAIT);
835 ring->oversized_packets++;
840 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map,
841 mb, segs, &nr_segs, BUS_DMA_NOWAIT);
844 if (unlikely(err != 0)) {
845 ring->oversized_packets++;
848 /* make sure all mbuf data is written to RAM */
849 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map,
850 BUS_DMASYNC_PREWRITE);
853 /* compute number of DS needed */
854 ds_cnt = (dseg - ((volatile struct mlx4_wqe_data_seg *)tx_desc)) + nr_segs;
857 * Check if the next request can wrap around and fill the end
858 * of the current request with zero immediate data:
860 pad = DIV_ROUND_UP(ds_cnt, DS_FACT);
861 pad = (~(ring->prod + pad)) & ring->size_mask;
863 if (unlikely(pad < (MLX4_EN_TX_WQE_MAX_WQEBBS - 1))) {
865 * Compute the least number of DS blocks we need to
866 * pad in order to achieve a TX ring wraparound:
868 pad = (DS_FACT * (pad + 1));
871 * The hardware will automatically jump to the next
872 * TXBB. No need for padding.
877 /* compute total number of DS blocks */
880 * When modifying this code, please ensure that the following
881 * computation is always less than or equal to 0x3F:
883 * ((MLX4_EN_TX_WQE_MAX_WQEBBS - 1) * DS_FACT) +
884 * (MLX4_EN_TX_WQE_MAX_WQEBBS * DS_FACT)
886 * Else the "ds_cnt" variable can become too big.
888 tx_desc->ctrl.fence_size = (ds_cnt & 0x3f);
890 /* store pointer to mbuf */
892 tx_info->nr_txbb = DIV_ROUND_UP(ds_cnt, DS_FACT);
893 bf_size = ds_cnt * DS_SIZE_ALIGNMENT;
894 bf_prod = ring->prod;
896 /* compute end of "dseg" array */
897 dseg += nr_segs + pad;
899 /* pad using zero immediate dseg */
905 dseg->byte_count = SET_BYTE_COUNT((1 << 31)|0);
908 /* fill segment list */
910 if (unlikely(segs[nr_segs].ds_len == 0)) {
915 dseg->byte_count = SET_BYTE_COUNT((1 << 31)|0);
918 dseg->addr = cpu_to_be64((uint64_t)segs[nr_segs].ds_addr);
919 dseg->lkey = cpu_to_be32(priv->mdev->mr.key);
921 dseg->byte_count = SET_BYTE_COUNT((uint32_t)segs[nr_segs].ds_len);
927 /* write owner bits in reverse order */
928 if ((opcode & cpu_to_be32(0x1F)) == cpu_to_be32(MLX4_OPCODE_LSO))
929 mlx4_en_store_inline_lso_header(dseg_inline, ihs, owner_bit);
931 mlx4_en_store_inline_header(dseg_inline, ihs, owner_bit);
933 if (unlikely(priv->validate_loopback)) {
934 /* Copy dst mac address to wqe */
935 struct ether_header *ethh;
939 ethh = mtod(mb, struct ether_header *);
940 mac = mlx4_en_mac_to_u64(ethh->ether_dhost);
942 mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
943 mac_l = (u32) (mac & 0xffffffff);
944 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
945 tx_desc->ctrl.imm = cpu_to_be32(mac_l);
949 /* update producer counter */
950 ring->prod += tx_info->nr_txbb;
952 if (ring->bf_enabled && bf_size <= MAX_BF &&
953 (tx_desc->ctrl.ins_vlan != MLX4_WQE_CTRL_INS_VLAN)) {
955 /* store doorbell number */
956 *(volatile __be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
958 /* or in producer number for this WQE */
959 opcode |= cpu_to_be32((bf_prod & 0xffff) << 8);
962 * Ensure the new descriptor hits memory before
963 * setting ownership of this descriptor to HW:
966 tx_desc->ctrl.owner_opcode = opcode;
968 mlx4_bf_copy(((u8 *)ring->bf.reg) + ring->bf.offset,
969 (volatile unsigned long *) &tx_desc->ctrl, bf_size);
971 ring->bf.offset ^= ring->bf.buf_size;
974 * Ensure the new descriptor hits memory before
975 * setting ownership of this descriptor to HW:
978 tx_desc->ctrl.owner_opcode = opcode;
980 writel(cpu_to_be32(ring->doorbell_qpn),
981 ((u8 *)ring->bf.uar->map) + MLX4_SEND_DOORBELL);
992 mlx4_en_transmit_locked(struct ifnet *dev, int tx_ind, struct mbuf *m)
994 struct mlx4_en_priv *priv = netdev_priv(dev);
995 struct mlx4_en_tx_ring *ring;
997 int enqueued, err = 0;
999 ring = priv->tx_ring[tx_ind];
1000 if ((dev->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1001 IFF_DRV_RUNNING || priv->port_up == 0) {
1003 err = drbr_enqueue(dev, ring->br, m);
1010 * If we can't insert mbuf into drbr, try to xmit anyway.
1011 * We keep the error we got so we could return that after xmit.
1013 err = drbr_enqueue(dev, ring->br, m);
1015 /* Process the queue */
1016 while ((next = drbr_peek(dev, ring->br)) != NULL) {
1017 if (mlx4_en_xmit(priv, tx_ind, &next) != 0) {
1019 drbr_advance(dev, ring->br);
1021 drbr_putback(dev, ring->br, next);
1025 drbr_advance(dev, ring->br);
1027 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0)
1032 ring->watchdog_time = ticks;
1038 mlx4_en_tx_que(void *context, int pending)
1040 struct mlx4_en_tx_ring *ring;
1041 struct mlx4_en_priv *priv;
1042 struct net_device *dev;
1043 struct mlx4_en_cq *cq;
1047 priv = dev->if_softc;
1049 ring = priv->tx_ring[tx_ind];
1051 if (priv->port_up != 0 &&
1052 (dev->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1053 mlx4_en_xmit_poll(priv, tx_ind);
1054 spin_lock(&ring->tx_lock);
1055 if (!drbr_empty(dev, ring->br))
1056 mlx4_en_transmit_locked(dev, tx_ind, NULL);
1057 spin_unlock(&ring->tx_lock);
1062 mlx4_en_transmit(struct ifnet *dev, struct mbuf *m)
1064 struct mlx4_en_priv *priv = netdev_priv(dev);
1065 struct mlx4_en_tx_ring *ring;
1066 struct mlx4_en_cq *cq;
1069 if (priv->port_up == 0) {
1074 /* Compute which queue to use */
1075 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
1076 i = (m->m_pkthdr.flowid % 128) % priv->tx_ring_num;
1079 i = mlx4_en_select_queue(dev, m);
1082 ring = priv->tx_ring[i];
1083 if (spin_trylock(&ring->tx_lock)) {
1084 err = mlx4_en_transmit_locked(dev, i, m);
1085 spin_unlock(&ring->tx_lock);
1087 mlx4_en_xmit_poll(priv, i);
1089 err = drbr_enqueue(dev, ring->br, m);
1090 cq = priv->tx_cq[i];
1091 taskqueue_enqueue(cq->tq, &cq->cq_task);
1098 * Flush ring buffers.
1101 mlx4_en_qflush(struct ifnet *dev)
1103 struct mlx4_en_priv *priv = netdev_priv(dev);
1104 struct mlx4_en_tx_ring *ring;
1107 if (priv->port_up == 0)
1110 for (int i = 0; i < priv->tx_ring_num; i++) {
1111 ring = priv->tx_ring[i];
1112 spin_lock(&ring->tx_lock);
1113 while ((m = buf_ring_dequeue_sc(ring->br)) != NULL)
1115 spin_unlock(&ring->tx_lock);