2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/page.h>
35 #include <linux/mlx4/cq.h>
36 #include <linux/slab.h>
37 #include <linux/mlx4/qp.h>
38 #include <linux/if_vlan.h>
39 #include <linux/vmalloc.h>
40 #include <linux/moduleparam.h>
42 #include <netinet/in_systm.h>
43 #include <netinet/in.h>
44 #include <netinet/if_ether.h>
45 #include <netinet/ip.h>
46 #include <netinet/ip6.h>
47 #include <netinet/tcp.h>
48 #include <netinet/tcp_lro.h>
49 #include <netinet/udp.h>
55 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
60 static int inline_thold __read_mostly = MAX_INLINE;
62 module_param_named(inline_thold, inline_thold, uint, 0444);
63 MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
65 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
66 struct mlx4_en_tx_ring **pring, u32 size,
67 u16 stride, int node, int queue_idx)
69 struct mlx4_en_dev *mdev = priv->mdev;
70 struct mlx4_en_tx_ring *ring;
74 ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node);
76 ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL);
78 en_err(priv, "Failed allocating TX ring\n");
84 ring->size_mask = size - 1;
85 ring->stride = stride;
86 ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
87 ring->inline_thold = min(inline_thold, MAX_INLINE);
88 mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF);
89 mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF);
91 /* Allocate the buf ring */
92 ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF,
93 M_WAITOK, &ring->tx_lock.m);
94 if (ring->br == NULL) {
95 en_err(priv, "Failed allocating tx_info ring\n");
99 tmp = size * sizeof(struct mlx4_en_tx_info);
100 ring->tx_info = vmalloc_node(tmp, node);
101 if (!ring->tx_info) {
102 ring->tx_info = vmalloc(tmp);
103 if (!ring->tx_info) {
109 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
112 ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
113 if (!ring->bounce_buf) {
114 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
115 if (!ring->bounce_buf) {
120 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
122 /* Allocate HW buffers on provided NUMA node */
123 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
126 en_err(priv, "Failed allocating hwq resources\n");
130 err = mlx4_en_map_buffer(&ring->wqres.buf);
132 en_err(priv, "Failed to map TX buffer\n");
136 ring->buf = ring->wqres.buf.direct.buf;
138 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
139 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
140 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
142 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
145 en_err(priv, "failed reserving qp for TX ring\n");
149 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
151 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
154 ring->qp.event = mlx4_en_sqp_event;
156 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
158 en_dbg(DRV, priv, "working without blueflame (%d)", err);
159 ring->bf.uar = &mdev->priv_uar;
160 ring->bf.uar->map = mdev->uar_map;
161 ring->bf_enabled = false;
163 ring->bf_enabled = true;
164 ring->queue_index = queue_idx;
165 if (queue_idx < priv->num_tx_rings_p_up )
166 CPU_SET(queue_idx, &ring->affinity_mask);
172 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
174 mlx4_en_unmap_buffer(&ring->wqres.buf);
176 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
178 kfree(ring->bounce_buf);
180 vfree(ring->tx_info);
182 buf_ring_free(ring->br, M_DEVBUF);
187 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
188 struct mlx4_en_tx_ring **pring)
190 struct mlx4_en_dev *mdev = priv->mdev;
191 struct mlx4_en_tx_ring *ring = *pring;
192 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
194 buf_ring_free(ring->br, M_DEVBUF);
195 if (ring->bf_enabled)
196 mlx4_bf_free(mdev->dev, &ring->bf);
197 mlx4_qp_remove(mdev->dev, &ring->qp);
198 mlx4_qp_free(mdev->dev, &ring->qp);
199 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
200 mlx4_en_unmap_buffer(&ring->wqres.buf);
201 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
202 kfree(ring->bounce_buf);
203 vfree(ring->tx_info);
204 mtx_destroy(&ring->tx_lock.m);
205 mtx_destroy(&ring->comp_lock.m);
210 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
211 struct mlx4_en_tx_ring *ring,
212 int cq, int user_prio)
214 struct mlx4_en_dev *mdev = priv->mdev;
219 ring->cons = 0xffffffff;
220 ring->last_nr_txbb = 1;
223 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
224 memset(ring->buf, 0, ring->buf_size);
226 ring->qp_state = MLX4_QP_STATE_RST;
227 ring->doorbell_qpn = ring->qp.qpn << 8;
229 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
230 ring->cqn, user_prio, &ring->context);
231 if (ring->bf_enabled)
232 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
234 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
235 &ring->qp, &ring->qp_state);
239 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
240 struct mlx4_en_tx_ring *ring)
242 struct mlx4_en_dev *mdev = priv->mdev;
244 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
245 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
248 static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
249 struct mlx4_en_tx_ring *ring,
252 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
253 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
254 void *end = ring->buf + ring->buf_size;
255 __be32 *ptr = (__be32 *)tx_desc;
256 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
259 /* Optimize the common case when there are no wraparounds */
260 if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end))
261 /* Stamp the freed descriptor */
262 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
267 /* Stamp the freed descriptor */
268 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
271 if ((void *)ptr >= end) {
273 stamp ^= cpu_to_be32(0x80000000);
278 static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
279 struct mlx4_en_tx_ring *ring,
280 int index, u8 owner, u64 timestamp)
282 struct mlx4_en_dev *mdev = priv->mdev;
283 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
284 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
285 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
286 struct mbuf *mb = tx_info->mb;
287 void *end = ring->buf + ring->buf_size;
288 int frags = tx_info->nr_segs;;
291 /* Optimize the common case when there are no wraparounds */
292 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
294 if (tx_info->linear) {
295 dma_unmap_single(priv->ddev,
296 (dma_addr_t) be64_to_cpu(data->addr),
297 be32_to_cpu(data->byte_count),
302 for (i = 0; i < frags; i++) {
303 pci_unmap_single(mdev->pdev,
304 (dma_addr_t) be64_to_cpu(data[i].addr),
305 data[i].byte_count, PCI_DMA_TODEVICE);
310 if ((void *) data >= end) {
311 data = ring->buf + ((void *)data - end);
314 if (tx_info->linear) {
315 dma_unmap_single(priv->ddev,
316 (dma_addr_t) be64_to_cpu(data->addr),
317 be32_to_cpu(data->byte_count),
322 for (i = 0; i < frags; i++) {
323 /* Check for wraparound before unmapping */
324 if ((void *) data >= end)
326 pci_unmap_single(mdev->pdev,
327 (dma_addr_t) be64_to_cpu(data->addr),
328 data->byte_count, PCI_DMA_TODEVICE);
333 /* Send a copy of the frame to the BPF listener */
334 if (priv->dev && priv->dev->if_bpf)
335 ETHER_BPF_MTAP(priv->dev, mb);
337 return tx_info->nr_txbb;
340 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
342 struct mlx4_en_priv *priv = netdev_priv(dev);
345 /* Skip last polled descriptor */
346 ring->cons += ring->last_nr_txbb;
347 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
348 ring->cons, ring->prod);
350 if ((u32) (ring->prod - ring->cons) > ring->size) {
351 en_warn(priv, "Tx consumer passed producer!\n");
355 while (ring->cons != ring->prod) {
356 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
357 ring->cons & ring->size_mask,
358 !!(ring->cons & ring->size), 0);
359 ring->cons += ring->last_nr_txbb;
364 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
369 static int mlx4_en_process_tx_cq(struct net_device *dev,
370 struct mlx4_en_cq *cq)
372 struct mlx4_en_priv *priv = netdev_priv(dev);
373 struct mlx4_cq *mcq = &cq->mcq;
374 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
375 struct mlx4_cqe *cqe;
377 u16 new_index, ring_index, stamp_index;
378 u32 txbbs_skipped = 0;
380 u32 cons_index = mcq->cons_index;
382 u32 size_mask = ring->size_mask;
383 struct mlx4_cqe *buf = cq->buf;
386 int factor = priv->cqe_factor;
394 index = cons_index & size_mask;
395 cqe = &buf[(index << factor) + factor];
396 ring_index = ring->cons & size_mask;
397 stamp_index = ring_index;
399 /* Process all completed CQEs */
400 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
401 cons_index & size)) {
403 * make sure we read the CQE after we read the
408 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
409 MLX4_CQE_OPCODE_ERROR)) {
410 en_err(priv, "CQE completed in error - vendor syndrom: 0x%x syndrom: 0x%x\n",
411 ((struct mlx4_err_cqe *)cqe)->
413 ((struct mlx4_err_cqe *)cqe)->syndrome);
416 /* Skip over last polled CQE */
417 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
420 txbbs_skipped += ring->last_nr_txbb;
421 ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
422 /* free next descriptor */
423 ring->last_nr_txbb = mlx4_en_free_tx_desc(
424 priv, ring, ring_index,
425 !!((ring->cons + txbbs_skipped) &
426 ring->size), timestamp);
427 mlx4_en_stamp_wqe(priv, ring, stamp_index,
428 !!((ring->cons + txbbs_stamp) &
430 stamp_index = ring_index;
431 txbbs_stamp = txbbs_skipped;
433 bytes += ring->tx_info[ring_index].nr_bytes;
434 } while (ring_index != new_index);
437 index = cons_index & size_mask;
438 cqe = &buf[(index << factor) + factor];
443 * To prevent CQ overflow we first update CQ consumer and only then
446 mcq->cons_index = cons_index;
449 ring->cons += txbbs_skipped;
451 /* Wakeup Tx queue if it was stopped and ring is not full */
452 if (unlikely(ring->blocked) &&
453 (ring->prod - ring->cons) <= ring->full_size) {
455 if (atomic_fetchadd_int(&priv->blocked, -1) == 1)
456 atomic_clear_int(&dev->if_drv_flags ,IFF_DRV_OACTIVE);
458 priv->port_stats.wake_queue++;
463 void mlx4_en_tx_irq(struct mlx4_cq *mcq)
465 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
466 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
467 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
469 if (!spin_trylock(&ring->comp_lock))
471 mlx4_en_process_tx_cq(cq->dev, cq);
472 mod_timer(&cq->timer, jiffies + 1);
473 spin_unlock(&ring->comp_lock);
476 void mlx4_en_poll_tx_cq(unsigned long data)
478 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
479 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
480 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
483 INC_PERF_COUNTER(priv->pstats.tx_poll);
485 if (!spin_trylock(&ring->comp_lock)) {
486 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
489 mlx4_en_process_tx_cq(cq->dev, cq);
490 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
492 /* If there are still packets in flight and the timer has not already
493 * been scheduled by the Tx routine then schedule it here to guarantee
494 * completion processing of these packets */
495 if (inflight && priv->port_up)
496 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
498 spin_unlock(&ring->comp_lock);
501 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
502 struct mlx4_en_tx_ring *ring,
504 unsigned int desc_size)
506 u32 copy = (ring->size - index) * TXBB_SIZE;
509 for (i = desc_size - copy - 4; i >= 0; i -= 4) {
510 if ((i & (TXBB_SIZE - 1)) == 0)
513 *((u32 *) (ring->buf + i)) =
514 *((u32 *) (ring->bounce_buf + copy + i));
517 for (i = copy - 4; i >= 4 ; i -= 4) {
518 if ((i & (TXBB_SIZE - 1)) == 0)
521 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
522 *((u32 *) (ring->bounce_buf + i));
525 /* Return real descriptor location */
526 return ring->buf + index * TXBB_SIZE;
529 static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
531 struct mlx4_en_cq *cq = priv->tx_cq[tx_ind];
532 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind];
534 /* If we don't have a pending timer, set one up to catch our recent
535 post in case the interface becomes idle */
536 if (!timer_pending(&cq->timer))
537 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
539 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
540 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
541 if (spin_trylock(&ring->comp_lock)) {
542 mlx4_en_process_tx_cq(priv->dev, cq);
543 spin_unlock(&ring->comp_lock);
547 static int is_inline(struct mbuf *mb, int thold)
549 if (thold && mb->m_pkthdr.len <= thold &&
550 (mb->m_pkthdr.csum_flags & CSUM_TSO) == 0)
556 static int inline_size(struct mbuf *mb)
560 len = mb->m_pkthdr.len;
561 if (len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
562 <= MLX4_INLINE_ALIGN)
563 return ALIGN(len + CTRL_SIZE +
564 sizeof(struct mlx4_wqe_inline_seg), 16);
566 return ALIGN(len + CTRL_SIZE + 2 *
567 sizeof(struct mlx4_wqe_inline_seg), 16);
570 static int get_head_size(struct mbuf *mb)
572 struct ether_vlan_header *eh;
575 int ip_hlen, tcp_hlen;
580 eh = mtod(mb, struct ether_vlan_header *);
581 if (mb->m_len < ETHER_HDR_LEN)
583 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
584 eth_type = ntohs(eh->evl_proto);
585 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
587 eth_type = ntohs(eh->evl_encap_proto);
588 eth_hdr_len = ETHER_HDR_LEN;
590 if (mb->m_len < eth_hdr_len)
594 ip = (struct ip *)(mb->m_data + eth_hdr_len);
595 if (mb->m_len < eth_hdr_len + sizeof(*ip))
597 if (ip->ip_p != IPPROTO_TCP)
599 ip_hlen = ip->ip_hl << 2;
600 eth_hdr_len += ip_hlen;
603 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len);
604 if (mb->m_len < eth_hdr_len + sizeof(*ip6))
606 if (ip6->ip6_nxt != IPPROTO_TCP)
608 eth_hdr_len += sizeof(*ip6);
613 if (mb->m_len < eth_hdr_len + sizeof(*th))
615 th = (struct tcphdr *)(mb->m_data + eth_hdr_len);
616 tcp_hlen = th->th_off << 2;
617 eth_hdr_len += tcp_hlen;
618 if (mb->m_len < eth_hdr_len)
620 return (eth_hdr_len);
623 static int get_real_size(struct mbuf *mb, struct net_device *dev, int *p_n_segs,
624 int *lso_header_size, int inl)
629 for (m = mb; m != NULL; m = m->m_next)
633 if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
634 *lso_header_size = get_head_size(mb);
635 if (*lso_header_size) {
636 if (mb->m_len == *lso_header_size)
639 return CTRL_SIZE + nr_segs * DS_SIZE +
640 ALIGN(*lso_header_size + 4, DS_SIZE);
643 *lso_header_size = 0;
646 return inline_size(mb);
647 return (CTRL_SIZE + nr_segs * DS_SIZE);
650 static struct mbuf *mb_copy(struct mbuf *mb, int *offp, char *data, int len)
657 bytes = min(mb->m_len - off, len);
659 memcpy(data, mb->m_data + off, bytes);
663 if (off == mb->m_len) {
672 static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct mbuf *mb,
673 int real_size, u16 *vlan_tag, int tx_ind)
675 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
676 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
681 len = mb->m_pkthdr.len;
683 inl->byte_count = cpu_to_be32(1 << 31 |
684 (max_t(typeof(len), len, MIN_PKT_LEN)));
685 mb_copy(mb, &off, (void *)(inl + 1), len);
686 if (len < MIN_PKT_LEN)
687 memset(((void *)(inl + 1)) + len, 0,
690 inl->byte_count = cpu_to_be32(1 << 31 | spc);
691 mb = mb_copy(mb, &off, (void *)(inl + 1), spc);
692 inl = (void *) (inl + 1) + spc;
693 mb_copy(mb, &off, (void *)(inl + 1), len - spc);
695 inl->byte_count = cpu_to_be32(1 << 31 | (len - spc));
697 tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
698 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
699 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
702 static unsigned long hashrandom;
703 static void hashrandom_init(void *arg)
705 hashrandom = random();
707 SYSINIT(hashrandom_init, SI_SUB_KLD, SI_ORDER_SECOND, &hashrandom_init, NULL);
709 u16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb)
711 struct mlx4_en_priv *priv = netdev_priv(dev);
712 u32 rings_p_up = priv->num_tx_rings_p_up;
716 #if (MLX4_EN_NUM_UP > 1)
717 /* Obtain VLAN information if present */
718 if (mb->m_flags & M_VLANTAG) {
719 u32 vlan_tag = mb->m_pkthdr.ether_vtag;
720 up = (vlan_tag >> 13) % MLX4_EN_NUM_UP;
723 /* check if flowid is set */
724 if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE)
725 queue_index = mb->m_pkthdr.flowid;
727 queue_index = mlx4_en_hashmbuf(MLX4_F_HASHL3 | MLX4_F_HASHL4, mb, hashrandom);
729 return ((queue_index % rings_p_up) + (up * rings_p_up));
732 static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
734 __iowrite64_copy(dst, src, bytecnt / 8);
737 static u64 mlx4_en_mac_to_u64(u8 *addr)
742 for (i = 0; i < ETHER_ADDR_LEN; i++) {
749 static int mlx4_en_xmit(struct net_device *dev, int tx_ind, struct mbuf **mbp)
751 struct mlx4_en_priv *priv = netdev_priv(dev);
752 struct mlx4_en_dev *mdev = priv->mdev;
753 struct mlx4_en_tx_ring *ring;
754 struct mlx4_en_cq *cq;
755 struct mlx4_en_tx_desc *tx_desc;
756 struct mlx4_wqe_data_seg *data;
757 struct mlx4_en_tx_info *tx_info;
764 u32 index, bf_index, ring_size;
778 ring = priv->tx_ring[tx_ind];
779 ring_size = ring->size;
780 inl = is_inline(mb, ring->inline_thold);
783 real_size = get_real_size(mb, dev, &nr_segs, &lso_header_size, inl);
784 if (unlikely(!real_size))
787 /* Align descriptor to TXBB size */
788 desc_size = ALIGN(real_size, TXBB_SIZE);
789 nr_txbb = desc_size / TXBB_SIZE;
790 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
792 mb = m_defrag(*mbp, M_NOWAIT);
801 en_warn(priv, "Oversized header or SG list\n");
805 /* Obtain VLAN information if present */
806 if (mb->m_flags & M_VLANTAG) {
807 vlan_tag = mb->m_pkthdr.ether_vtag;
810 /* Check available TXBBs and 2K spare for prefetch
811 * Even if netif_tx_stop_queue() will be called
812 * driver will send current packet to ensure
813 * that at least one completion will be issued after
816 if (unlikely((int)(ring->prod - ring->cons) > ring->full_size)) {
817 /* every full Tx ring stops queue */
818 if (ring->blocked == 0)
819 atomic_add_int(&priv->blocked, 1);
820 /* Set HW-queue-is-full flag */
821 atomic_set_int(&dev->if_drv_flags, IFF_DRV_OACTIVE);
823 priv->port_stats.queue_stopped++;
824 ring->queue_stopped++;
826 /* Use interrupts to find out when queue opened */
827 cq = priv->tx_cq[tx_ind];
828 mlx4_en_arm_cq(priv, cq);
832 /* Track current inflight packets for performance analysis */
833 AVG_PERF_COUNTER(priv->pstats.inflight_avg,
834 (u32) (ring->prod - ring->cons - 1));
836 /* Packet is good - grab an index and transmit it */
837 index = ring->prod & ring->size_mask;
838 bf_index = ring->prod;
840 /* See if we have enough space for whole descriptor TXBB for setting
841 * SW ownership on next descriptor; if not, use a bounce buffer. */
842 if (likely(index + nr_txbb <= ring_size))
843 tx_desc = ring->buf + index * TXBB_SIZE;
845 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
849 /* Save mb in tx_info ring */
850 tx_info = &ring->tx_info[index];
852 tx_info->nr_txbb = nr_txbb;
853 tx_info->nr_segs = nr_segs;
855 if (lso_header_size) {
856 memcpy(tx_desc->lso.header, mb->m_data, lso_header_size);
857 data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
859 /* lso header is part of m_data.
860 * need to omit when mapping DMA */
861 mb->m_data += lso_header_size;
862 mb->m_len -= lso_header_size;
865 data = &tx_desc->data;
867 /* valid only for none inline segments */
868 tx_info->data_offset = (void *)data - (void *)tx_desc;
873 for (i = 0, m = mb; i < nr_segs; i++, m = m->m_next) {
878 dma = pci_map_single(mdev->dev->pdev, m->m_data,
879 m->m_len, PCI_DMA_TODEVICE);
880 data->addr = cpu_to_be64(dma);
881 data->lkey = cpu_to_be32(mdev->mr.key);
883 data->byte_count = cpu_to_be32(m->m_len);
886 if (lso_header_size) {
887 mb->m_data -= lso_header_size;
888 mb->m_len += lso_header_size;
894 /* Prepare ctrl segement apart opcode+ownership, which depends on
895 * whether LSO is used */
896 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
897 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
899 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
900 tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
901 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO |
902 CSUM_TCP | CSUM_UDP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) {
903 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))
904 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
905 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP |
906 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
907 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_TCP_UDP_CSUM);
908 priv->port_stats.tx_chksum_offload++;
912 if (unlikely(priv->validate_loopback)) {
913 /* Copy dst mac address to wqe */
914 struct ether_header *ethh;
918 ethh = mtod(mb, struct ether_header *);
919 mac = mlx4_en_mac_to_u64(ethh->ether_dhost);
921 mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
922 mac_l = (u32) (mac & 0xffffffff);
923 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
924 tx_desc->ctrl.imm = cpu_to_be32(mac_l);
928 /* Handle LSO (TSO) packets */
929 if (lso_header_size) {
931 /* Mark opcode as LSO */
932 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
933 ((ring->prod & ring_size) ?
934 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
936 /* Fill in the LSO prefix */
937 tx_desc->lso.mss_hdr_size = cpu_to_be32(
938 mb->m_pkthdr.tso_segsz << 16 | lso_header_size);
940 priv->port_stats.tso_packets++;
941 segsz = mb->m_pkthdr.tso_segsz;
942 i = ((mb->m_pkthdr.len - lso_header_size + segsz - 1) / segsz);
943 tx_info->nr_bytes= mb->m_pkthdr.len + (i - 1) * lso_header_size;
946 /* Normal (Non LSO) packet */
947 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
948 ((ring->prod & ring_size) ?
949 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
950 tx_info->nr_bytes = max(mb->m_pkthdr.len,
951 (unsigned int)ETHER_MIN_LEN - ETHER_CRC_LEN);
955 ring->bytes += tx_info->nr_bytes;
956 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len);
959 build_inline_wqe(tx_desc, mb, real_size, &vlan_tag, tx_ind);
963 ring->prod += nr_txbb;
966 /* If we used a bounce buffer then copy descriptor back into place */
967 if (unlikely(bounce))
968 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
969 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
970 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
971 op_own |= htonl((bf_index & 0xffff) << 8);
972 /* Ensure new descirptor hits memory
973 * before setting ownership of this descriptor to HW */
975 tx_desc->ctrl.owner_opcode = op_own;
979 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
984 ring->bf.offset ^= ring->bf.buf_size;
986 /* Ensure new descirptor hits memory
987 * before setting ownership of this descriptor to HW */
989 tx_desc->ctrl.owner_opcode = op_own;
991 writel(cpu_to_be32(ring->doorbell_qpn), ring->bf.uar->map + MLX4_SEND_DOORBELL);
1002 mlx4_en_transmit_locked(struct ifnet *dev, int tx_ind, struct mbuf *m)
1004 struct mlx4_en_priv *priv = netdev_priv(dev);
1005 struct mlx4_en_tx_ring *ring;
1007 int enqueued, err = 0;
1009 ring = priv->tx_ring[tx_ind];
1010 if ((dev->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1011 IFF_DRV_RUNNING || priv->port_up == 0) {
1013 err = drbr_enqueue(dev, ring->br, m);
1019 if ((err = drbr_enqueue(dev, ring->br, m)) != 0)
1022 /* Process the queue */
1023 while ((next = drbr_peek(dev, ring->br)) != NULL) {
1024 if ((err = mlx4_en_xmit(dev, tx_ind, &next)) != 0) {
1026 drbr_advance(dev, ring->br);
1028 drbr_putback(dev, ring->br, next);
1032 drbr_advance(dev, ring->br);
1034 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0)
1039 ring->watchdog_time = ticks;
1045 mlx4_en_tx_que(void *context, int pending)
1047 struct mlx4_en_tx_ring *ring;
1048 struct mlx4_en_priv *priv;
1049 struct net_device *dev;
1050 struct mlx4_en_cq *cq;
1054 priv = dev->if_softc;
1056 ring = priv->tx_ring[tx_ind];
1057 if (dev->if_drv_flags & IFF_DRV_RUNNING) {
1058 mlx4_en_xmit_poll(priv, tx_ind);
1059 spin_lock(&ring->tx_lock);
1060 if (!drbr_empty(dev, ring->br))
1061 mlx4_en_transmit_locked(dev, tx_ind, NULL);
1062 spin_unlock(&ring->tx_lock);
1067 mlx4_en_transmit(struct ifnet *dev, struct mbuf *m)
1069 struct mlx4_en_priv *priv = netdev_priv(dev);
1070 struct mlx4_en_tx_ring *ring;
1071 struct mlx4_en_cq *cq;
1074 /* Compute which queue to use */
1075 i = mlx4_en_select_queue(dev, m);
1077 ring = priv->tx_ring[i];
1079 if (spin_trylock(&ring->tx_lock)) {
1080 err = mlx4_en_transmit_locked(dev, i, m);
1081 spin_unlock(&ring->tx_lock);
1083 mlx4_en_xmit_poll(priv, i);
1085 err = drbr_enqueue(dev, ring->br, m);
1086 cq = priv->tx_cq[i];
1087 taskqueue_enqueue(cq->tq, &cq->cq_task);
1094 * Flush ring buffers.
1097 mlx4_en_qflush(struct ifnet *dev)
1099 struct mlx4_en_priv *priv = netdev_priv(dev);
1100 struct mlx4_en_tx_ring *ring;
1103 for (int i = 0; i < priv->tx_ring_num; i++) {
1104 ring = priv->tx_ring[i];
1105 spin_lock(&ring->tx_lock);
1106 while ((m = buf_ring_dequeue_sc(ring->br)) != NULL)
1108 spin_unlock(&ring->tx_lock);