2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <dev/mlx4/cq.h>
35 #include <linux/slab.h>
36 #include <dev/mlx4/qp.h>
37 #include <linux/if_ether.h>
38 #include <linux/if_vlan.h>
39 #include <linux/vmalloc.h>
40 #include <dev/mlx4/driver.h>
41 #ifdef CONFIG_NET_RX_BUSY_POLL
42 #include <net/busy_poll.h>
48 static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
49 struct mlx4_en_rx_ring *ring,
52 struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
53 (ring->buf + (ring->stride * index));
57 /* Set size and memtype fields */
58 rx_desc->data[0].byte_count = cpu_to_be32(priv->rx_mb_size - MLX4_NET_IP_ALIGN);
59 rx_desc->data[0].lkey = cpu_to_be32(priv->mdev->mr.key);
62 * If the number of used fragments does not fill up the ring
63 * stride, remaining (unused) fragments must be padded with
64 * null address/size and a special memory key:
66 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
67 for (i = 1; i < possible_frags; i++) {
68 rx_desc->data[i].byte_count = 0;
69 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
70 rx_desc->data[i].addr = 0;
75 mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring,
76 __be64 *pdma, struct mlx4_en_rx_mbuf *mb_list)
78 bus_dma_segment_t segs[1];
84 /* try to allocate a new spare mbuf */
85 if (unlikely(ring->spare.mbuf == NULL)) {
86 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
87 if (unlikely(mb == NULL))
89 /* setup correct length */
90 mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;
92 /* make sure IP header gets aligned */
93 m_adj(mb, MLX4_NET_IP_ALIGN);
95 /* load spare mbuf into BUSDMA */
96 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map,
97 mb, segs, &nsegs, BUS_DMA_NOWAIT);
98 if (unlikely(err != 0)) {
103 /* store spare info */
104 ring->spare.mbuf = mb;
105 ring->spare.paddr_be = cpu_to_be64(segs[0].ds_addr);
107 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
108 BUS_DMASYNC_PREREAD);
111 /* synchronize and unload the current mbuf, if any */
112 if (likely(mb_list->mbuf != NULL)) {
113 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map,
114 BUS_DMASYNC_POSTREAD);
115 bus_dmamap_unload(ring->dma_tag, mb_list->dma_map);
118 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
119 if (unlikely(mb == NULL))
122 /* setup correct length */
123 mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;
125 /* make sure IP header gets aligned */
126 m_adj(mb, MLX4_NET_IP_ALIGN);
128 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map,
129 mb, segs, &nsegs, BUS_DMA_NOWAIT);
130 if (unlikely(err != 0)) {
135 *pdma = cpu_to_be64(segs[0].ds_addr);
138 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD);
143 map = mb_list->dma_map;
144 mb_list->dma_map = ring->spare.dma_map;
145 ring->spare.dma_map = map;
148 mb_list->mbuf = ring->spare.mbuf;
149 ring->spare.mbuf = NULL;
151 /* store physical address */
152 *pdma = ring->spare.paddr_be;
157 mlx4_en_free_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_mbuf *mb_list)
159 bus_dmamap_t map = mb_list->dma_map;
160 bus_dmamap_sync(ring->dma_tag, map, BUS_DMASYNC_POSTREAD);
161 bus_dmamap_unload(ring->dma_tag, map);
162 m_freem(mb_list->mbuf);
163 mb_list->mbuf = NULL; /* safety clearing */
167 mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
168 struct mlx4_en_rx_ring *ring, int index)
170 struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
171 (ring->buf + (index * ring->stride));
172 struct mlx4_en_rx_mbuf *mb_list = ring->mbuf + index;
174 mb_list->mbuf = NULL;
176 if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list)) {
177 priv->port_stats.rx_alloc_failed++;
184 mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
186 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
189 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
191 struct mlx4_en_rx_ring *ring;
197 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
198 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
199 ring = priv->rx_ring[ring_ind];
201 err = mlx4_en_prepare_rx_desc(priv, ring,
204 if (ring->actual_size == 0) {
205 en_err(priv, "Failed to allocate "
206 "enough rx buffers\n");
210 rounddown_pow_of_two(ring->actual_size);
211 en_warn(priv, "Only %d buffers allocated "
212 "reducing ring size to %d\n",
213 ring->actual_size, new_size);
224 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
225 ring = priv->rx_ring[ring_ind];
226 while (ring->actual_size > new_size) {
229 mlx4_en_free_buf(ring,
230 ring->mbuf + ring->actual_size);
237 static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
238 struct mlx4_en_rx_ring *ring)
242 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
243 ring->cons, ring->prod);
245 /* Unmap and free Rx buffers */
246 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
247 while (ring->cons != ring->prod) {
248 index = ring->cons & ring->size_mask;
249 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
250 mlx4_en_free_buf(ring, ring->mbuf + index);
255 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
260 struct mlx4_dev *dev = mdev->dev;
262 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
263 num_of_eqs = max_t(int, MIN_RX_RINGS,
265 mlx4_get_eqs_per_port(mdev->dev, i),
268 num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
270 mdev->profile.prof[i].rx_ring_num =
271 rounddown_pow_of_two(num_rx_rings);
275 void mlx4_en_calc_rx_buf(struct net_device *dev)
277 struct mlx4_en_priv *priv = netdev_priv(dev);
278 int eff_mtu = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN +
281 if (eff_mtu > MJUM16BYTES) {
282 en_err(priv, "MTU(%u) is too big\n", (unsigned)dev->if_mtu);
283 eff_mtu = MJUM16BYTES;
284 } else if (eff_mtu > MJUM9BYTES) {
285 eff_mtu = MJUM16BYTES;
286 } else if (eff_mtu > MJUMPAGESIZE) {
287 eff_mtu = MJUM9BYTES;
288 } else if (eff_mtu > MCLBYTES) {
289 eff_mtu = MJUMPAGESIZE;
294 priv->rx_mb_size = eff_mtu;
296 en_dbg(DRV, priv, "Effective RX MTU: %d bytes\n", eff_mtu);
299 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
300 struct mlx4_en_rx_ring **pring,
303 struct mlx4_en_dev *mdev = priv->mdev;
304 struct mlx4_en_rx_ring *ring;
309 ring = kzalloc(sizeof(struct mlx4_en_rx_ring), GFP_KERNEL);
311 en_err(priv, "Failed to allocate RX ring structure\n");
315 /* Create DMA descriptor TAG */
316 if ((err = -bus_dma_tag_create(
317 bus_get_dma_tag(mdev->pdev->dev.bsddev),
318 1, /* any alignment */
320 BUS_SPACE_MAXADDR, /* lowaddr */
321 BUS_SPACE_MAXADDR, /* highaddr */
322 NULL, NULL, /* filter, filterarg */
323 MJUM16BYTES, /* maxsize */
325 MJUM16BYTES, /* maxsegsize */
327 NULL, NULL, /* lockfunc, lockfuncarg */
329 en_err(priv, "Failed to create DMA tag\n");
336 ring->size_mask = size - 1;
337 ring->stride = roundup_pow_of_two(
338 sizeof(struct mlx4_en_rx_desc) + DS_SIZE);
339 ring->log_stride = ffs(ring->stride) - 1;
340 ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
342 tmp = size * sizeof(struct mlx4_en_rx_mbuf);
344 ring->mbuf = kzalloc(tmp, GFP_KERNEL);
345 if (ring->mbuf == NULL) {
350 err = -bus_dmamap_create(ring->dma_tag, 0, &ring->spare.dma_map);
354 for (x = 0; x != size; x++) {
355 err = -bus_dmamap_create(ring->dma_tag, 0,
356 &ring->mbuf[x].dma_map);
359 bus_dmamap_destroy(ring->dma_tag,
360 ring->mbuf[x].dma_map);
364 en_dbg(DRV, priv, "Allocated MBUF ring at addr:%p size:%d\n",
367 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
368 ring->buf_size, 2 * PAGE_SIZE);
372 err = mlx4_en_map_buffer(&ring->wqres.buf);
374 en_err(priv, "Failed to map RX buffer\n");
377 ring->buf = ring->wqres.buf.direct.buf;
382 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
384 for (x = 0; x != size; x++) {
385 bus_dmamap_destroy(ring->dma_tag,
386 ring->mbuf[x].dma_map);
388 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
392 bus_dma_tag_destroy(ring->dma_tag);
398 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
400 struct mlx4_en_rx_ring *ring;
404 int stride = roundup_pow_of_two(
405 sizeof(struct mlx4_en_rx_desc) + DS_SIZE);
407 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
408 ring = priv->rx_ring[ring_ind];
412 ring->actual_size = 0;
413 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
414 ring->rx_mb_size = priv->rx_mb_size;
416 ring->stride = stride;
417 if (ring->stride <= TXBB_SIZE) {
418 /* Stamp first unused send wqe */
419 __be32 *ptr = (__be32 *)ring->buf;
420 __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
422 /* Move pointer to start of rx section */
423 ring->buf += TXBB_SIZE;
426 ring->log_stride = ffs(ring->stride) - 1;
427 ring->buf_size = ring->size * ring->stride;
429 memset(ring->buf, 0, ring->buf_size);
430 mlx4_en_update_rx_prod_db(ring);
432 /* Initialize all descriptors */
433 for (i = 0; i < ring->size; i++)
434 mlx4_en_init_rx_desc(priv, ring, i);
437 /* Configure lro mngr */
438 if (priv->dev->if_capenable & IFCAP_LRO) {
439 if (tcp_lro_init(&ring->lro))
440 priv->dev->if_capenable &= ~IFCAP_LRO;
442 ring->lro.ifp = priv->dev;
448 err = mlx4_en_fill_rx_buffers(priv);
452 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
453 ring = priv->rx_ring[ring_ind];
455 ring->size_mask = ring->actual_size - 1;
456 mlx4_en_update_rx_prod_db(ring);
462 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
463 mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
465 ring_ind = priv->rx_ring_num - 1;
467 while (ring_ind >= 0) {
468 ring = priv->rx_ring[ring_ind];
469 if (ring->stride <= TXBB_SIZE)
470 ring->buf -= TXBB_SIZE;
478 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
479 struct mlx4_en_rx_ring **pring,
480 u32 size, u16 stride)
482 struct mlx4_en_dev *mdev = priv->mdev;
483 struct mlx4_en_rx_ring *ring = *pring;
486 mlx4_en_unmap_buffer(&ring->wqres.buf);
487 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
488 for (x = 0; x != size; x++)
489 bus_dmamap_destroy(ring->dma_tag, ring->mbuf[x].dma_map);
490 /* free spare mbuf, if any */
491 if (ring->spare.mbuf != NULL) {
492 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
493 BUS_DMASYNC_POSTREAD);
494 bus_dmamap_unload(ring->dma_tag, ring->spare.dma_map);
495 m_freem(ring->spare.mbuf);
497 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
499 bus_dma_tag_destroy(ring->dma_tag);
502 #ifdef CONFIG_RFS_ACCEL
503 mlx4_en_cleanup_filters(priv, ring);
507 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
508 struct mlx4_en_rx_ring *ring)
511 tcp_lro_free(&ring->lro);
513 mlx4_en_free_rx_buf(priv, ring);
514 if (ring->stride <= TXBB_SIZE)
515 ring->buf -= TXBB_SIZE;
519 static void validate_loopback(struct mlx4_en_priv *priv, struct mbuf *mb)
522 int offset = ETHER_HDR_LEN;
524 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
525 if (*(mb->m_data + offset) != (unsigned char) (i & 0xff))
529 priv->loopback_ok = 1;
536 static inline int invalid_cqe(struct mlx4_en_priv *priv,
537 struct mlx4_cqe *cqe)
539 /* Drop packet on bad receive or bad checksum */
540 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
541 MLX4_CQE_OPCODE_ERROR)) {
542 en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
543 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
544 ((struct mlx4_err_cqe *)cqe)->syndrome);
547 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
548 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
556 mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
557 struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_mbuf *mb_list,
565 /* collect used fragment while atomically replacing it */
566 if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list))
569 /* range check hardware computed value */
570 if (unlikely(length > mb->m_len))
573 /* update total packet length in packet header */
574 mb->m_len = mb->m_pkthdr.len = length;
579 mlx4_en_rss_hash(__be16 status, int udp_rss)
582 status_all = cpu_to_be16(
583 MLX4_CQE_STATUS_IPV4 |
584 MLX4_CQE_STATUS_IPV4F |
585 MLX4_CQE_STATUS_IPV6 |
586 MLX4_CQE_STATUS_TCP |
587 MLX4_CQE_STATUS_UDP),
588 status_ipv4_tcp = cpu_to_be16(
589 MLX4_CQE_STATUS_IPV4 |
590 MLX4_CQE_STATUS_TCP),
591 status_ipv6_tcp = cpu_to_be16(
592 MLX4_CQE_STATUS_IPV6 |
593 MLX4_CQE_STATUS_TCP),
594 status_ipv4_udp = cpu_to_be16(
595 MLX4_CQE_STATUS_IPV4 |
596 MLX4_CQE_STATUS_UDP),
597 status_ipv6_udp = cpu_to_be16(
598 MLX4_CQE_STATUS_IPV6 |
599 MLX4_CQE_STATUS_UDP),
600 status_ipv4 = cpu_to_be16(MLX4_CQE_STATUS_IPV4),
601 status_ipv6 = cpu_to_be16(MLX4_CQE_STATUS_IPV6)
604 status &= status_all;
606 case status_ipv4_tcp:
607 return (M_HASHTYPE_RSS_TCP_IPV4);
608 case status_ipv6_tcp:
609 return (M_HASHTYPE_RSS_TCP_IPV6);
610 case status_ipv4_udp:
611 return (udp_rss ? M_HASHTYPE_RSS_UDP_IPV4
612 : M_HASHTYPE_RSS_IPV4);
613 case status_ipv6_udp:
614 return (udp_rss ? M_HASHTYPE_RSS_UDP_IPV6
615 : M_HASHTYPE_RSS_IPV6);
617 if (status & status_ipv4)
618 return (M_HASHTYPE_RSS_IPV4);
619 if (status & status_ipv6)
620 return (M_HASHTYPE_RSS_IPV6);
621 return (M_HASHTYPE_OPAQUE_HASH);
625 /* For cpu arch with cache line of 64B the performance is better when cqe size==64B
626 * To enlarge cqe size from 32B to 64B --> 32B of garbage (i.e. 0xccccccc)
627 * was added in the beginning of each cqe (the real data is in the corresponding 32B).
628 * The following calc ensures that when factor==1, it means we are aligned to 64B
629 * and we get the real cqe data*/
630 #define CQE_FACTOR_INDEX(index, factor) (((index) << (factor)) + (factor))
631 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
633 struct mlx4_en_priv *priv = netdev_priv(dev);
634 struct mlx4_cqe *cqe;
635 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
636 struct mlx4_en_rx_mbuf *mb_list;
637 struct mlx4_en_rx_desc *rx_desc;
639 struct mlx4_cq *mcq = &cq->mcq;
640 struct mlx4_cqe *buf = cq->buf;
644 u32 cons_index = mcq->cons_index;
645 u32 size_mask = ring->size_mask;
647 int factor = priv->cqe_factor;
648 const int udp_rss = priv->mdev->profile.udp_rss;
653 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
654 * descriptor offset can be deducted from the CQE index instead of
655 * reading 'cqe->index' */
656 index = cons_index & size_mask;
657 cqe = &buf[CQE_FACTOR_INDEX(index, factor)];
659 /* Process all completed CQEs */
660 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
661 cons_index & size)) {
662 mb_list = ring->mbuf + index;
663 rx_desc = (struct mlx4_en_rx_desc *)
664 (ring->buf + (index << ring->log_stride));
667 * make sure we read the CQE after we read the ownership bit
671 if (invalid_cqe(priv, cqe)) {
675 * Packet is OK - process it.
677 length = be32_to_cpu(cqe->byte_cnt);
678 length -= ring->fcs_del;
680 mb = mlx4_en_rx_mb(priv, ring, rx_desc, mb_list, length);
686 ring->bytes += length;
689 if (unlikely(priv->validate_loopback)) {
690 validate_loopback(priv, mb);
694 /* forward Toeplitz compatible hash value */
695 mb->m_pkthdr.flowid = be32_to_cpu(cqe->immed_rss_invalid);
696 M_HASHTYPE_SET(mb, mlx4_en_rss_hash(cqe->status, udp_rss));
697 mb->m_pkthdr.rcvif = dev;
698 if (be32_to_cpu(cqe->vlan_my_qpn) &
699 MLX4_CQE_CVLAN_PRESENT_MASK) {
700 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
701 mb->m_flags |= M_VLANTAG;
703 if (likely(dev->if_capenable &
704 (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) &&
705 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
706 (cqe->checksum == cpu_to_be16(0xffff))) {
707 priv->port_stats.rx_chksum_good++;
708 mb->m_pkthdr.csum_flags =
709 CSUM_IP_CHECKED | CSUM_IP_VALID |
710 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
711 mb->m_pkthdr.csum_data = htons(0xffff);
712 /* This packet is eligible for LRO if it is:
713 * - DIX Ethernet (type interpretation)
715 * - without IP options
716 * - not an IP fragment
719 if (mlx4_en_can_lro(cqe->status) &&
720 (dev->if_capenable & IFCAP_LRO)) {
721 if (ring->lro.lro_cnt != 0 &&
722 tcp_lro_rx(&ring->lro, mb, 0) == 0)
727 /* LRO not possible, complete processing here */
728 INC_PERF_COUNTER(priv->pstats.lro_misses);
730 mb->m_pkthdr.csum_flags = 0;
731 priv->port_stats.rx_chksum_none++;
734 /* Push it up the stack */
735 dev->if_input(dev, mb);
739 index = cons_index & size_mask;
740 cqe = &buf[CQE_FACTOR_INDEX(index, factor)];
741 if (++polled == budget)
744 /* Flush all pending IP reassembly sessions */
747 tcp_lro_flush_all(&ring->lro);
749 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
750 mcq->cons_index = cons_index;
752 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
753 ring->cons = mcq->cons_index;
754 ring->prod += polled; /* Polled descriptors were realocated in place */
755 mlx4_en_update_rx_prod_db(ring);
760 /* Rx CQ polling - called by NAPI */
761 static int mlx4_en_poll_rx_cq(struct mlx4_en_cq *cq, int budget)
763 struct net_device *dev = cq->dev;
766 done = mlx4_en_process_rx_cq(dev, cq, budget);
772 void mlx4_en_rx_irq(struct mlx4_cq *mcq)
774 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
775 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
778 // Shoot one within the irq context
779 // Because there is no NAPI in freeBSD
780 done = mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET);
781 if (priv->port_up && (done == MLX4_EN_RX_BUDGET) ) {
782 cq->curr_poll_rx_cpu_id = curcpu;
783 taskqueue_enqueue(cq->tq, &cq->cq_task);
786 mlx4_en_arm_cq(priv, cq);
790 void mlx4_en_rx_que(void *context, int pending)
792 struct mlx4_en_cq *cq;
799 sched_bind(td, cq->curr_poll_rx_cpu_id);
802 while (mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET)
803 == MLX4_EN_RX_BUDGET);
804 mlx4_en_arm_cq(cq->dev->if_softc, cq);
808 /* RSS related functions */
810 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
811 struct mlx4_en_rx_ring *ring,
812 enum mlx4_qp_state *state,
815 struct mlx4_en_dev *mdev = priv->mdev;
816 struct mlx4_qp_context *context;
819 context = kmalloc(sizeof *context , GFP_KERNEL);
821 en_err(priv, "Failed to allocate qp context\n");
825 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL);
827 en_err(priv, "Failed to allocate qp #%x\n", qpn);
830 qp->event = mlx4_en_sqp_event;
832 memset(context, 0, sizeof *context);
833 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
834 qpn, ring->cqn, -1, context);
835 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
837 /* Cancel FCS removal if FW allows */
838 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
839 context->param3 |= cpu_to_be32(1 << 29);
840 ring->fcs_del = ETH_FCS_LEN;
844 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
846 mlx4_qp_remove(mdev->dev, qp);
847 mlx4_qp_free(mdev->dev, qp);
849 mlx4_en_update_rx_prod_db(ring);
855 int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
860 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, 0);
862 en_err(priv, "Failed reserving drop qpn\n");
865 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL);
867 en_err(priv, "Failed allocating drop qp\n");
868 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
875 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
879 qpn = priv->drop_qp.qpn;
880 mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
881 mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
882 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
886 mlx4_en_get_rss_key(struct mlx4_en_priv *priv __unused,
889 static const u32 rsskey[10] = {
890 cpu_to_be32(0xD181C62C),
891 cpu_to_be32(0xF7F4DB5B),
892 cpu_to_be32(0x1983A2FC),
893 cpu_to_be32(0x943E1ADB),
894 cpu_to_be32(0xD9389E6B),
895 cpu_to_be32(0xD1039C2C),
896 cpu_to_be32(0xA74499AD),
897 cpu_to_be32(0x593D56D9),
898 cpu_to_be32(0xF3253C06),
899 cpu_to_be32(0x2ADC1FFC)
903 *keylen = sizeof(rsskey);
907 u8 mlx4_en_get_rss_mask(struct mlx4_en_priv *priv)
909 u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
912 if (priv->mdev->profile.udp_rss)
913 rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
917 /* Allocate rx qp's and configure them according to rss map */
918 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
920 struct mlx4_en_dev *mdev = priv->mdev;
921 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
922 struct mlx4_qp_context context;
923 struct mlx4_rss_context *rss_context;
931 en_dbg(DRV, priv, "Configuring rss steering\n");
932 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
934 &rss_map->base_qpn, 0);
936 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
940 for (i = 0; i < priv->rx_ring_num; i++) {
941 priv->rx_ring[i]->qpn = rss_map->base_qpn + i;
942 err = mlx4_en_config_rss_qp(priv, priv->rx_ring[i]->qpn,
952 /* Configure RSS indirection qp */
953 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL);
955 en_err(priv, "Failed to allocate RSS indirection QP\n");
958 rss_map->indir_qp.event = mlx4_en_sqp_event;
959 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
960 priv->rx_ring[0]->cqn, -1, &context);
962 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
963 rss_rings = priv->rx_ring_num;
965 rss_rings = priv->prof->rss_rings;
967 ptr = ((u8 *)&context) + offsetof(struct mlx4_qp_context, pri_path) +
968 MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
970 rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
971 (rss_map->base_qpn));
972 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
973 if (priv->mdev->profile.udp_rss)
974 rss_context->base_qpn_udp = rss_context->default_qpn;
975 rss_context->flags = mlx4_en_get_rss_mask(priv);
976 rss_context->hash_fn = MLX4_RSS_HASH_TOP;
977 key = mlx4_en_get_rss_key(priv, NULL);
978 for (i = 0; i < 10; i++)
979 rss_context->rss_key[i] = key[i];
981 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
982 &rss_map->indir_qp, &rss_map->indir_state);
989 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
990 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
991 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
992 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
994 for (i = 0; i < good_qps; i++) {
995 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
996 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
997 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
998 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1000 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1004 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
1006 struct mlx4_en_dev *mdev = priv->mdev;
1007 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1010 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1011 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
1012 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
1013 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
1015 for (i = 0; i < priv->rx_ring_num; i++) {
1016 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1017 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1018 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1019 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1021 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);