2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/mlx4/cq.h>
35 #include <linux/slab.h>
36 #include <linux/mlx4/qp.h>
37 #include <linux/if_ether.h>
38 #include <linux/if_vlan.h>
39 #include <linux/vmalloc.h>
40 #include <linux/mlx4/driver.h>
41 #ifdef CONFIG_NET_RX_BUSY_POLL
42 #include <net/busy_poll.h>
48 static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
49 struct mlx4_en_rx_ring *ring,
52 struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
53 (ring->buf + (ring->stride * index));
57 /* Set size and memtype fields */
58 rx_desc->data[0].byte_count = cpu_to_be32(priv->rx_mb_size - MLX4_NET_IP_ALIGN);
59 rx_desc->data[0].lkey = cpu_to_be32(priv->mdev->mr.key);
62 * If the number of used fragments does not fill up the ring
63 * stride, remaining (unused) fragments must be padded with
64 * null address/size and a special memory key:
66 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
67 for (i = 1; i < possible_frags; i++) {
68 rx_desc->data[i].byte_count = 0;
69 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
70 rx_desc->data[i].addr = 0;
75 mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring,
76 __be64 *pdma, struct mlx4_en_rx_mbuf *mb_list)
78 bus_dma_segment_t segs[1];
84 /* try to allocate a new spare mbuf */
85 if (unlikely(ring->spare.mbuf == NULL)) {
86 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
87 if (unlikely(mb == NULL))
89 /* setup correct length */
90 mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;
92 /* make sure IP header gets aligned */
93 m_adj(mb, MLX4_NET_IP_ALIGN);
95 /* load spare mbuf into BUSDMA */
96 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map,
97 mb, segs, &nsegs, BUS_DMA_NOWAIT);
98 if (unlikely(err != 0)) {
103 /* store spare info */
104 ring->spare.mbuf = mb;
105 ring->spare.paddr_be = cpu_to_be64(segs[0].ds_addr);
107 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
108 BUS_DMASYNC_PREREAD);
111 /* synchronize and unload the current mbuf, if any */
112 if (likely(mb_list->mbuf != NULL)) {
113 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map,
114 BUS_DMASYNC_POSTREAD);
115 bus_dmamap_unload(ring->dma_tag, mb_list->dma_map);
118 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
119 if (unlikely(mb == NULL))
122 /* setup correct length */
123 mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;
125 /* make sure IP header gets aligned */
126 m_adj(mb, MLX4_NET_IP_ALIGN);
128 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map,
129 mb, segs, &nsegs, BUS_DMA_NOWAIT);
130 if (unlikely(err != 0)) {
135 *pdma = cpu_to_be64(segs[0].ds_addr);
138 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD);
143 map = mb_list->dma_map;
144 mb_list->dma_map = ring->spare.dma_map;
145 ring->spare.dma_map = map;
148 mb_list->mbuf = ring->spare.mbuf;
149 ring->spare.mbuf = NULL;
151 /* store physical address */
152 *pdma = ring->spare.paddr_be;
157 mlx4_en_free_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_mbuf *mb_list)
159 bus_dmamap_t map = mb_list->dma_map;
160 bus_dmamap_sync(ring->dma_tag, map, BUS_DMASYNC_POSTREAD);
161 bus_dmamap_unload(ring->dma_tag, map);
162 m_freem(mb_list->mbuf);
163 mb_list->mbuf = NULL; /* safety clearing */
167 mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
168 struct mlx4_en_rx_ring *ring, int index)
170 struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
171 (ring->buf + (index * ring->stride));
172 struct mlx4_en_rx_mbuf *mb_list = ring->mbuf + index;
174 mb_list->mbuf = NULL;
176 if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list)) {
177 priv->port_stats.rx_alloc_failed++;
184 mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
186 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
189 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
191 struct mlx4_en_rx_ring *ring;
197 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
198 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
199 ring = priv->rx_ring[ring_ind];
201 err = mlx4_en_prepare_rx_desc(priv, ring,
204 if (ring->actual_size == 0) {
205 en_err(priv, "Failed to allocate "
206 "enough rx buffers\n");
210 rounddown_pow_of_two(ring->actual_size);
211 en_warn(priv, "Only %d buffers allocated "
212 "reducing ring size to %d\n",
213 ring->actual_size, new_size);
224 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
225 ring = priv->rx_ring[ring_ind];
226 while (ring->actual_size > new_size) {
229 mlx4_en_free_buf(ring,
230 ring->mbuf + ring->actual_size);
237 static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
238 struct mlx4_en_rx_ring *ring)
242 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
243 ring->cons, ring->prod);
245 /* Unmap and free Rx buffers */
246 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
247 while (ring->cons != ring->prod) {
248 index = ring->cons & ring->size_mask;
249 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
250 mlx4_en_free_buf(ring, ring->mbuf + index);
255 void mlx4_en_calc_rx_buf(struct net_device *dev)
257 struct mlx4_en_priv *priv = netdev_priv(dev);
258 int eff_mtu = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN +
261 if (eff_mtu > MJUM16BYTES) {
262 en_err(priv, "MTU(%d) is too big\n", (int)dev->if_mtu);
263 eff_mtu = MJUM16BYTES;
264 } else if (eff_mtu > MJUM9BYTES) {
265 eff_mtu = MJUM16BYTES;
266 } else if (eff_mtu > MJUMPAGESIZE) {
267 eff_mtu = MJUM9BYTES;
268 } else if (eff_mtu > MCLBYTES) {
269 eff_mtu = MJUMPAGESIZE;
274 priv->rx_mb_size = eff_mtu;
276 en_dbg(DRV, priv, "Effective RX MTU: %d bytes\n", eff_mtu);
279 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
280 struct mlx4_en_rx_ring **pring,
283 struct mlx4_en_dev *mdev = priv->mdev;
284 struct mlx4_en_rx_ring *ring;
289 ring = kzalloc(sizeof(struct mlx4_en_rx_ring), GFP_KERNEL);
291 en_err(priv, "Failed to allocate RX ring structure\n");
295 /* Create DMA descriptor TAG */
296 if ((err = -bus_dma_tag_create(
297 bus_get_dma_tag(mdev->pdev->dev.bsddev),
298 1, /* any alignment */
300 BUS_SPACE_MAXADDR, /* lowaddr */
301 BUS_SPACE_MAXADDR, /* highaddr */
302 NULL, NULL, /* filter, filterarg */
303 MJUM16BYTES, /* maxsize */
305 MJUM16BYTES, /* maxsegsize */
307 NULL, NULL, /* lockfunc, lockfuncarg */
309 en_err(priv, "Failed to create DMA tag\n");
316 ring->size_mask = size - 1;
317 ring->stride = roundup_pow_of_two(
318 sizeof(struct mlx4_en_rx_desc) + DS_SIZE);
319 ring->log_stride = ffs(ring->stride) - 1;
320 ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
322 tmp = size * sizeof(struct mlx4_en_rx_mbuf);
324 ring->mbuf = kzalloc(tmp, GFP_KERNEL);
325 if (ring->mbuf == NULL) {
330 err = -bus_dmamap_create(ring->dma_tag, 0, &ring->spare.dma_map);
334 for (x = 0; x != size; x++) {
335 err = -bus_dmamap_create(ring->dma_tag, 0,
336 &ring->mbuf[x].dma_map);
339 bus_dmamap_destroy(ring->dma_tag,
340 ring->mbuf[x].dma_map);
344 en_dbg(DRV, priv, "Allocated MBUF ring at addr:%p size:%d\n",
347 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
348 ring->buf_size, 2 * PAGE_SIZE);
352 err = mlx4_en_map_buffer(&ring->wqres.buf);
354 en_err(priv, "Failed to map RX buffer\n");
357 ring->buf = ring->wqres.buf.direct.buf;
362 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
364 for (x = 0; x != size; x++) {
365 bus_dmamap_destroy(ring->dma_tag,
366 ring->mbuf[x].dma_map);
368 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
372 bus_dma_tag_destroy(ring->dma_tag);
378 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
380 struct mlx4_en_rx_ring *ring;
384 int stride = roundup_pow_of_two(
385 sizeof(struct mlx4_en_rx_desc) + DS_SIZE);
387 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
388 ring = priv->rx_ring[ring_ind];
392 ring->actual_size = 0;
393 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
394 ring->rx_mb_size = priv->rx_mb_size;
396 ring->stride = stride;
397 if (ring->stride <= TXBB_SIZE) {
398 /* Stamp first unused send wqe */
399 __be32 *ptr = (__be32 *)ring->buf;
400 __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
402 /* Move pointer to start of rx section */
403 ring->buf += TXBB_SIZE;
406 ring->log_stride = ffs(ring->stride) - 1;
407 ring->buf_size = ring->size * ring->stride;
409 memset(ring->buf, 0, ring->buf_size);
410 mlx4_en_update_rx_prod_db(ring);
412 /* Initialize all descriptors */
413 for (i = 0; i < ring->size; i++)
414 mlx4_en_init_rx_desc(priv, ring, i);
417 /* Configure lro mngr */
418 if (priv->dev->if_capenable & IFCAP_LRO) {
419 if (tcp_lro_init(&ring->lro))
420 priv->dev->if_capenable &= ~IFCAP_LRO;
422 ring->lro.ifp = priv->dev;
428 err = mlx4_en_fill_rx_buffers(priv);
432 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
433 ring = priv->rx_ring[ring_ind];
435 ring->size_mask = ring->actual_size - 1;
436 mlx4_en_update_rx_prod_db(ring);
442 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
443 mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
445 ring_ind = priv->rx_ring_num - 1;
447 while (ring_ind >= 0) {
448 ring = priv->rx_ring[ring_ind];
449 if (ring->stride <= TXBB_SIZE)
450 ring->buf -= TXBB_SIZE;
458 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
459 struct mlx4_en_rx_ring **pring,
460 u32 size, u16 stride)
462 struct mlx4_en_dev *mdev = priv->mdev;
463 struct mlx4_en_rx_ring *ring = *pring;
466 mlx4_en_unmap_buffer(&ring->wqres.buf);
467 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
468 for (x = 0; x != size; x++)
469 bus_dmamap_destroy(ring->dma_tag, ring->mbuf[x].dma_map);
470 /* free spare mbuf, if any */
471 if (ring->spare.mbuf != NULL) {
472 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
473 BUS_DMASYNC_POSTREAD);
474 bus_dmamap_unload(ring->dma_tag, ring->spare.dma_map);
475 m_freem(ring->spare.mbuf);
477 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
479 bus_dma_tag_destroy(ring->dma_tag);
482 #ifdef CONFIG_RFS_ACCEL
483 mlx4_en_cleanup_filters(priv, ring);
487 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
488 struct mlx4_en_rx_ring *ring)
491 tcp_lro_free(&ring->lro);
493 mlx4_en_free_rx_buf(priv, ring);
494 if (ring->stride <= TXBB_SIZE)
495 ring->buf -= TXBB_SIZE;
499 static void validate_loopback(struct mlx4_en_priv *priv, struct mbuf *mb)
502 int offset = ETHER_HDR_LEN;
504 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
505 if (*(mb->m_data + offset) != (unsigned char) (i & 0xff))
509 priv->loopback_ok = 1;
516 static inline int invalid_cqe(struct mlx4_en_priv *priv,
517 struct mlx4_cqe *cqe)
519 /* Drop packet on bad receive or bad checksum */
520 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
521 MLX4_CQE_OPCODE_ERROR)) {
522 en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
523 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
524 ((struct mlx4_err_cqe *)cqe)->syndrome);
527 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
528 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
536 mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
537 struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_mbuf *mb_list,
545 /* collect used fragment while atomically replacing it */
546 if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list))
549 /* range check hardware computed value */
550 if (unlikely(length > mb->m_len))
553 /* update total packet length in packet header */
554 mb->m_len = mb->m_pkthdr.len = length;
558 /* For cpu arch with cache line of 64B the performance is better when cqe size==64B
559 * To enlarge cqe size from 32B to 64B --> 32B of garbage (i.e. 0xccccccc)
560 * was added in the beginning of each cqe (the real data is in the corresponding 32B).
561 * The following calc ensures that when factor==1, it means we are alligned to 64B
562 * and we get the real cqe data*/
563 #define CQE_FACTOR_INDEX(index, factor) ((index << factor) + factor)
564 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
566 struct mlx4_en_priv *priv = netdev_priv(dev);
567 struct mlx4_cqe *cqe;
568 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
569 struct mlx4_en_rx_mbuf *mb_list;
570 struct mlx4_en_rx_desc *rx_desc;
572 struct mlx4_cq *mcq = &cq->mcq;
573 struct mlx4_cqe *buf = cq->buf;
575 struct lro_entry *queued;
580 u32 cons_index = mcq->cons_index;
581 u32 size_mask = ring->size_mask;
583 int factor = priv->cqe_factor;
588 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
589 * descriptor offset can be deducted from the CQE index instead of
590 * reading 'cqe->index' */
591 index = cons_index & size_mask;
592 cqe = &buf[CQE_FACTOR_INDEX(index, factor)];
594 /* Process all completed CQEs */
595 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
596 cons_index & size)) {
597 mb_list = ring->mbuf + index;
598 rx_desc = (struct mlx4_en_rx_desc *)
599 (ring->buf + (index << ring->log_stride));
602 * make sure we read the CQE after we read the ownership bit
606 if (invalid_cqe(priv, cqe)) {
610 * Packet is OK - process it.
612 length = be32_to_cpu(cqe->byte_cnt);
613 length -= ring->fcs_del;
615 mb = mlx4_en_rx_mb(priv, ring, rx_desc, mb_list, length);
621 ring->bytes += length;
624 if (unlikely(priv->validate_loopback)) {
625 validate_loopback(priv, mb);
629 /* forward Toeplitz compatible hash value */
630 mb->m_pkthdr.flowid = be32_to_cpu(cqe->immed_rss_invalid);
631 mb->m_flags |= M_FLOWID;
632 mb->m_pkthdr.rcvif = dev;
633 if (be32_to_cpu(cqe->vlan_my_qpn) &
634 MLX4_CQE_VLAN_PRESENT_MASK) {
635 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
636 mb->m_flags |= M_VLANTAG;
638 if (likely(dev->if_capenable &
639 (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) &&
640 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
641 (cqe->checksum == cpu_to_be16(0xffff))) {
642 priv->port_stats.rx_chksum_good++;
643 mb->m_pkthdr.csum_flags =
644 CSUM_IP_CHECKED | CSUM_IP_VALID |
645 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
646 mb->m_pkthdr.csum_data = htons(0xffff);
647 /* This packet is eligible for LRO if it is:
648 * - DIX Ethernet (type interpretation)
650 * - without IP options
651 * - not an IP fragment
654 if (mlx4_en_can_lro(cqe->status) &&
655 (dev->if_capenable & IFCAP_LRO)) {
656 if (ring->lro.lro_cnt != 0 &&
657 tcp_lro_rx(&ring->lro, mb, 0) == 0)
662 /* LRO not possible, complete processing here */
663 INC_PERF_COUNTER(priv->pstats.lro_misses);
665 mb->m_pkthdr.csum_flags = 0;
666 priv->port_stats.rx_chksum_none++;
669 /* Push it up the stack */
670 dev->if_input(dev, mb);
674 index = cons_index & size_mask;
675 cqe = &buf[CQE_FACTOR_INDEX(index, factor)];
676 if (++polled == budget)
679 /* Flush all pending IP reassembly sessions */
682 while ((queued = SLIST_FIRST(&ring->lro.lro_active)) != NULL) {
683 SLIST_REMOVE_HEAD(&ring->lro.lro_active, next);
684 tcp_lro_flush(&ring->lro, queued);
687 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
688 mcq->cons_index = cons_index;
690 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
691 ring->cons = mcq->cons_index;
692 ring->prod += polled; /* Polled descriptors were realocated in place */
693 mlx4_en_update_rx_prod_db(ring);
698 /* Rx CQ polling - called by NAPI */
699 static int mlx4_en_poll_rx_cq(struct mlx4_en_cq *cq, int budget)
701 struct net_device *dev = cq->dev;
704 done = mlx4_en_process_rx_cq(dev, cq, budget);
710 void mlx4_en_rx_irq(struct mlx4_cq *mcq)
712 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
713 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
716 // Shoot one within the irq context
717 // Because there is no NAPI in freeBSD
718 done = mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET);
719 if (priv->port_up && (done == MLX4_EN_RX_BUDGET) ) {
720 cq->curr_poll_rx_cpu_id = curcpu;
721 taskqueue_enqueue(cq->tq, &cq->cq_task);
724 mlx4_en_arm_cq(priv, cq);
728 void mlx4_en_rx_que(void *context, int pending)
730 struct mlx4_en_cq *cq;
737 sched_bind(td, cq->curr_poll_rx_cpu_id);
740 while (mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET)
741 == MLX4_EN_RX_BUDGET);
742 mlx4_en_arm_cq(cq->dev->if_softc, cq);
746 /* RSS related functions */
748 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
749 struct mlx4_en_rx_ring *ring,
750 enum mlx4_qp_state *state,
753 struct mlx4_en_dev *mdev = priv->mdev;
754 struct mlx4_qp_context *context;
757 context = kmalloc(sizeof *context , GFP_KERNEL);
759 en_err(priv, "Failed to allocate qp context\n");
763 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
765 en_err(priv, "Failed to allocate qp #%x\n", qpn);
768 qp->event = mlx4_en_sqp_event;
770 memset(context, 0, sizeof *context);
771 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
772 qpn, ring->cqn, -1, context);
773 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
775 /* Cancel FCS removal if FW allows */
776 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
777 context->param3 |= cpu_to_be32(1 << 29);
778 ring->fcs_del = ETH_FCS_LEN;
782 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
784 mlx4_qp_remove(mdev->dev, qp);
785 mlx4_qp_free(mdev->dev, qp);
787 mlx4_en_update_rx_prod_db(ring);
793 int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
798 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, 0);
800 en_err(priv, "Failed reserving drop qpn\n");
803 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
805 en_err(priv, "Failed allocating drop qp\n");
806 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
813 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
817 qpn = priv->drop_qp.qpn;
818 mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
819 mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
820 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
823 /* Allocate rx qp's and configure them according to rss map */
824 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
826 struct mlx4_en_dev *mdev = priv->mdev;
827 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
828 struct mlx4_qp_context context;
829 struct mlx4_rss_context *rss_context;
832 u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
837 static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
838 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
839 0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
841 en_dbg(DRV, priv, "Configuring rss steering\n");
842 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
844 &rss_map->base_qpn, 0);
846 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
850 for (i = 0; i < priv->rx_ring_num; i++) {
851 priv->rx_ring[i]->qpn = rss_map->base_qpn + i;
852 err = mlx4_en_config_rss_qp(priv, priv->rx_ring[i]->qpn,
862 /* Configure RSS indirection qp */
863 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
865 en_err(priv, "Failed to allocate RSS indirection QP\n");
868 rss_map->indir_qp.event = mlx4_en_sqp_event;
869 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
870 priv->rx_ring[0]->cqn, -1, &context);
872 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
873 rss_rings = priv->rx_ring_num;
875 rss_rings = priv->prof->rss_rings;
877 ptr = ((u8 *)&context) + offsetof(struct mlx4_qp_context, pri_path) +
878 MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
880 rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
881 (rss_map->base_qpn));
882 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
883 if (priv->mdev->profile.udp_rss) {
884 rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
885 rss_context->base_qpn_udp = rss_context->default_qpn;
887 rss_context->flags = rss_mask;
888 rss_context->hash_fn = MLX4_RSS_HASH_TOP;
889 for (i = 0; i < 10; i++)
890 rss_context->rss_key[i] = cpu_to_be32(rsskey[i]);
892 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
893 &rss_map->indir_qp, &rss_map->indir_state);
900 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
901 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
902 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
903 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
905 for (i = 0; i < good_qps; i++) {
906 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
907 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
908 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
909 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
911 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
915 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
917 struct mlx4_en_dev *mdev = priv->mdev;
918 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
921 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
922 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
923 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
924 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
926 for (i = 0; i < priv->rx_ring_num; i++) {
927 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
928 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
929 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
930 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
932 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);