2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/mlx4/cq.h>
35 #include <linux/slab.h>
36 #include <linux/mlx4/qp.h>
37 #include <linux/if_ether.h>
38 #include <linux/if_vlan.h>
39 #include <linux/vmalloc.h>
40 #include <linux/mlx4/driver.h>
41 #ifdef CONFIG_NET_RX_BUSY_POLL
42 #include <net/busy_poll.h>
48 static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
49 struct mlx4_en_rx_ring *ring,
52 struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
53 (ring->buf + (ring->stride * index));
57 /* Set size and memtype fields */
58 rx_desc->data[0].byte_count = cpu_to_be32(priv->rx_mb_size);
59 rx_desc->data[0].lkey = cpu_to_be32(priv->mdev->mr.key);
62 * If the number of used fragments does not fill up the ring
63 * stride, remaining (unused) fragments must be padded with
64 * null address/size and a special memory key:
66 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
67 for (i = 1; i < possible_frags; i++) {
68 rx_desc->data[i].byte_count = 0;
69 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
70 rx_desc->data[i].addr = 0;
75 mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring,
76 __be64 *pdma, struct mlx4_en_rx_mbuf *mb_list)
78 bus_dma_segment_t segs[1];
84 /* try to allocate a new spare mbuf */
85 if (unlikely(ring->spare.mbuf == NULL)) {
86 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
87 if (unlikely(mb == NULL))
89 /* setup correct length */
90 mb->m_len = ring->rx_mb_size;
92 /* load spare mbuf into BUSDMA */
93 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map,
94 mb, segs, &nsegs, BUS_DMA_NOWAIT);
95 if (unlikely(err != 0)) {
100 /* store spare info */
101 ring->spare.mbuf = mb;
102 ring->spare.paddr_be = cpu_to_be64(segs[0].ds_addr);
104 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
105 BUS_DMASYNC_PREREAD);
108 /* synchronize and unload the current mbuf, if any */
109 if (likely(mb_list->mbuf != NULL)) {
110 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map,
111 BUS_DMASYNC_POSTREAD);
112 bus_dmamap_unload(ring->dma_tag, mb_list->dma_map);
115 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
116 if (unlikely(mb == NULL))
119 /* setup correct length */
120 mb->m_len = ring->rx_mb_size;
122 err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map,
123 mb, segs, &nsegs, BUS_DMA_NOWAIT);
124 if (unlikely(err != 0)) {
129 *pdma = cpu_to_be64(segs[0].ds_addr);
132 bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD);
137 map = mb_list->dma_map;
138 mb_list->dma_map = ring->spare.dma_map;
139 ring->spare.dma_map = map;
142 mb_list->mbuf = ring->spare.mbuf;
143 ring->spare.mbuf = NULL;
145 /* store physical address */
146 *pdma = ring->spare.paddr_be;
151 mlx4_en_free_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_mbuf *mb_list)
153 bus_dmamap_t map = mb_list->dma_map;
154 bus_dmamap_sync(ring->dma_tag, map, BUS_DMASYNC_POSTREAD);
155 bus_dmamap_unload(ring->dma_tag, map);
156 m_freem(mb_list->mbuf);
157 mb_list->mbuf = NULL; /* safety clearing */
161 mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
162 struct mlx4_en_rx_ring *ring, int index)
164 struct mlx4_en_rx_desc *rx_desc = (struct mlx4_en_rx_desc *)
165 (ring->buf + (index * ring->stride));
166 struct mlx4_en_rx_mbuf *mb_list = ring->mbuf + index;
168 mb_list->mbuf = NULL;
170 if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list)) {
171 priv->port_stats.rx_alloc_failed++;
178 mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
180 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
183 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
185 struct mlx4_en_rx_ring *ring;
191 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
192 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
193 ring = priv->rx_ring[ring_ind];
195 err = mlx4_en_prepare_rx_desc(priv, ring,
198 if (ring->actual_size == 0) {
199 en_err(priv, "Failed to allocate "
200 "enough rx buffers\n");
204 rounddown_pow_of_two(ring->actual_size);
205 en_warn(priv, "Only %d buffers allocated "
206 "reducing ring size to %d\n",
207 ring->actual_size, new_size);
218 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
219 ring = priv->rx_ring[ring_ind];
220 while (ring->actual_size > new_size) {
223 mlx4_en_free_buf(ring,
224 ring->mbuf + ring->actual_size);
231 static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
232 struct mlx4_en_rx_ring *ring)
236 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
237 ring->cons, ring->prod);
239 /* Unmap and free Rx buffers */
240 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
241 while (ring->cons != ring->prod) {
242 index = ring->cons & ring->size_mask;
243 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
244 mlx4_en_free_buf(ring, ring->mbuf + index);
249 void mlx4_en_calc_rx_buf(struct net_device *dev)
251 struct mlx4_en_priv *priv = netdev_priv(dev);
252 int eff_mtu = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
254 if (eff_mtu > MJUM16BYTES) {
255 en_err(priv, "MTU(%d) is too big\n", (int)dev->if_mtu);
256 eff_mtu = MJUM16BYTES;
257 } else if (eff_mtu > MJUM9BYTES) {
258 eff_mtu = MJUM16BYTES;
259 } else if (eff_mtu > MJUMPAGESIZE) {
260 eff_mtu = MJUM9BYTES;
261 } else if (eff_mtu > MCLBYTES) {
262 eff_mtu = MJUMPAGESIZE;
267 priv->rx_mb_size = eff_mtu;
269 en_dbg(DRV, priv, "Effective RX MTU: %d bytes\n", eff_mtu);
272 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
273 struct mlx4_en_rx_ring **pring,
276 struct mlx4_en_dev *mdev = priv->mdev;
277 struct mlx4_en_rx_ring *ring;
282 ring = kzalloc(sizeof(struct mlx4_en_rx_ring), GFP_KERNEL);
284 en_err(priv, "Failed to allocate RX ring structure\n");
288 /* Create DMA descriptor TAG */
289 if ((err = -bus_dma_tag_create(
290 bus_get_dma_tag(mdev->pdev->dev.bsddev),
291 1, /* any alignment */
293 BUS_SPACE_MAXADDR, /* lowaddr */
294 BUS_SPACE_MAXADDR, /* highaddr */
295 NULL, NULL, /* filter, filterarg */
296 MJUM16BYTES, /* maxsize */
298 MJUM16BYTES, /* maxsegsize */
300 NULL, NULL, /* lockfunc, lockfuncarg */
302 en_err(priv, "Failed to create DMA tag\n");
309 ring->size_mask = size - 1;
310 ring->stride = roundup_pow_of_two(
311 sizeof(struct mlx4_en_rx_desc) + DS_SIZE);
312 ring->log_stride = ffs(ring->stride) - 1;
313 ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
315 tmp = size * sizeof(struct mlx4_en_rx_mbuf);
317 ring->mbuf = kzalloc(tmp, GFP_KERNEL);
318 if (ring->mbuf == NULL) {
323 err = -bus_dmamap_create(ring->dma_tag, 0, &ring->spare.dma_map);
327 for (x = 0; x != size; x++) {
328 err = -bus_dmamap_create(ring->dma_tag, 0,
329 &ring->mbuf[x].dma_map);
332 bus_dmamap_destroy(ring->dma_tag,
333 ring->mbuf[x].dma_map);
337 en_dbg(DRV, priv, "Allocated MBUF ring at addr:%p size:%d\n",
340 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
341 ring->buf_size, 2 * PAGE_SIZE);
345 err = mlx4_en_map_buffer(&ring->wqres.buf);
347 en_err(priv, "Failed to map RX buffer\n");
350 ring->buf = ring->wqres.buf.direct.buf;
355 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
357 for (x = 0; x != size; x++) {
358 bus_dmamap_destroy(ring->dma_tag,
359 ring->mbuf[x].dma_map);
361 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
365 bus_dma_tag_destroy(ring->dma_tag);
371 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
373 struct mlx4_en_rx_ring *ring;
377 int stride = roundup_pow_of_two(
378 sizeof(struct mlx4_en_rx_desc) + DS_SIZE);
380 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
381 ring = priv->rx_ring[ring_ind];
385 ring->actual_size = 0;
386 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
387 ring->rx_alloc_order = priv->rx_alloc_order;
388 ring->rx_alloc_size = priv->rx_alloc_size;
389 ring->rx_buf_size = priv->rx_buf_size;
390 ring->rx_mb_size = priv->rx_mb_size;
392 ring->stride = stride;
393 if (ring->stride <= TXBB_SIZE)
394 ring->buf += TXBB_SIZE;
396 ring->log_stride = ffs(ring->stride) - 1;
397 ring->buf_size = ring->size * ring->stride;
399 memset(ring->buf, 0, ring->buf_size);
400 mlx4_en_update_rx_prod_db(ring);
402 /* Initialize all descriptors */
403 for (i = 0; i < ring->size; i++)
404 mlx4_en_init_rx_desc(priv, ring, i);
407 /* Configure lro mngr */
408 if (priv->dev->if_capenable & IFCAP_LRO) {
409 if (tcp_lro_init(&ring->lro))
410 priv->dev->if_capenable &= ~IFCAP_LRO;
412 ring->lro.ifp = priv->dev;
418 err = mlx4_en_fill_rx_buffers(priv);
422 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
423 ring = priv->rx_ring[ring_ind];
425 ring->size_mask = ring->actual_size - 1;
426 mlx4_en_update_rx_prod_db(ring);
432 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
433 mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
435 ring_ind = priv->rx_ring_num - 1;
437 while (ring_ind >= 0) {
438 ring = priv->rx_ring[ring_ind];
439 if (ring->stride <= TXBB_SIZE)
440 ring->buf -= TXBB_SIZE;
448 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
449 struct mlx4_en_rx_ring **pring,
450 u32 size, u16 stride)
452 struct mlx4_en_dev *mdev = priv->mdev;
453 struct mlx4_en_rx_ring *ring = *pring;
456 mlx4_en_unmap_buffer(&ring->wqres.buf);
457 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
458 for (x = 0; x != size; x++)
459 bus_dmamap_destroy(ring->dma_tag, ring->mbuf[x].dma_map);
460 /* free spare mbuf, if any */
461 if (ring->spare.mbuf != NULL) {
462 bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
463 BUS_DMASYNC_POSTREAD);
464 bus_dmamap_unload(ring->dma_tag, ring->spare.dma_map);
465 m_freem(ring->spare.mbuf);
467 bus_dmamap_destroy(ring->dma_tag, ring->spare.dma_map);
469 bus_dma_tag_destroy(ring->dma_tag);
472 #ifdef CONFIG_RFS_ACCEL
473 mlx4_en_cleanup_filters(priv, ring);
477 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
478 struct mlx4_en_rx_ring *ring)
481 tcp_lro_free(&ring->lro);
483 mlx4_en_free_rx_buf(priv, ring);
484 if (ring->stride <= TXBB_SIZE)
485 ring->buf -= TXBB_SIZE;
489 static void validate_loopback(struct mlx4_en_priv *priv, struct mbuf *mb)
492 int offset = ETHER_HDR_LEN;
494 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
495 if (*(mb->m_data + offset) != (unsigned char) (i & 0xff))
499 priv->loopback_ok = 1;
506 static inline int invalid_cqe(struct mlx4_en_priv *priv,
507 struct mlx4_cqe *cqe)
509 /* Drop packet on bad receive or bad checksum */
510 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
511 MLX4_CQE_OPCODE_ERROR)) {
512 en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
513 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
514 ((struct mlx4_err_cqe *)cqe)->syndrome);
517 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
518 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
526 mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
527 struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_mbuf *mb_list,
535 /* collect used fragment while atomically replacing it */
536 if (mlx4_en_alloc_buf(ring, &rx_desc->data[0].addr, mb_list))
539 /* range check hardware computed value */
540 if (unlikely(length > mb->m_len))
543 /* update total packet length in packet header */
544 mb->m_len = mb->m_pkthdr.len = length;
548 /* For cpu arch with cache line of 64B the performance is better when cqe size==64B
549 * To enlarge cqe size from 32B to 64B --> 32B of garbage (i.e. 0xccccccc)
550 * was added in the beginning of each cqe (the real data is in the corresponding 32B).
551 * The following calc ensures that when factor==1, it means we are alligned to 64B
552 * and we get the real cqe data*/
553 #define CQE_FACTOR_INDEX(index, factor) ((index << factor) + factor)
554 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
556 struct mlx4_en_priv *priv = netdev_priv(dev);
557 struct mlx4_cqe *cqe;
558 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
559 struct mlx4_en_rx_mbuf *mb_list;
560 struct mlx4_en_rx_desc *rx_desc;
562 struct mlx4_cq *mcq = &cq->mcq;
563 struct mlx4_cqe *buf = cq->buf;
565 struct lro_entry *queued;
570 u32 cons_index = mcq->cons_index;
571 u32 size_mask = ring->size_mask;
573 int factor = priv->cqe_factor;
578 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
579 * descriptor offset can be deducted from the CQE index instead of
580 * reading 'cqe->index' */
581 index = cons_index & size_mask;
582 cqe = &buf[CQE_FACTOR_INDEX(index, factor)];
584 /* Process all completed CQEs */
585 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
586 cons_index & size)) {
587 mb_list = ring->mbuf + index;
588 rx_desc = (struct mlx4_en_rx_desc *)
589 (ring->buf + (index << ring->log_stride));
592 * make sure we read the CQE after we read the ownership bit
596 if (invalid_cqe(priv, cqe)) {
600 * Packet is OK - process it.
602 length = be32_to_cpu(cqe->byte_cnt);
603 length -= ring->fcs_del;
605 mb = mlx4_en_rx_mb(priv, ring, rx_desc, mb_list, length);
611 ring->bytes += length;
614 if (unlikely(priv->validate_loopback)) {
615 validate_loopback(priv, mb);
619 mb->m_pkthdr.flowid = cq->ring;
620 M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
621 mb->m_pkthdr.rcvif = dev;
622 if (be32_to_cpu(cqe->vlan_my_qpn) &
623 MLX4_CQE_VLAN_PRESENT_MASK) {
624 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
625 mb->m_flags |= M_VLANTAG;
627 if (likely(dev->if_capenable &
628 (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) &&
629 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
630 (cqe->checksum == cpu_to_be16(0xffff))) {
631 priv->port_stats.rx_chksum_good++;
632 mb->m_pkthdr.csum_flags =
633 CSUM_IP_CHECKED | CSUM_IP_VALID |
634 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
635 mb->m_pkthdr.csum_data = htons(0xffff);
636 /* This packet is eligible for LRO if it is:
637 * - DIX Ethernet (type interpretation)
639 * - without IP options
640 * - not an IP fragment
643 if (mlx4_en_can_lro(cqe->status) &&
644 (dev->if_capenable & IFCAP_LRO)) {
645 if (ring->lro.lro_cnt != 0 &&
646 tcp_lro_rx(&ring->lro, mb, 0) == 0)
651 /* LRO not possible, complete processing here */
652 INC_PERF_COUNTER(priv->pstats.lro_misses);
654 mb->m_pkthdr.csum_flags = 0;
655 priv->port_stats.rx_chksum_none++;
658 /* Push it up the stack */
659 dev->if_input(dev, mb);
663 index = cons_index & size_mask;
664 cqe = &buf[CQE_FACTOR_INDEX(index, factor)];
665 if (++polled == budget)
668 /* Flush all pending IP reassembly sessions */
671 while ((queued = SLIST_FIRST(&ring->lro.lro_active)) != NULL) {
672 SLIST_REMOVE_HEAD(&ring->lro.lro_active, next);
673 tcp_lro_flush(&ring->lro, queued);
676 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
677 mcq->cons_index = cons_index;
679 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
680 ring->cons = mcq->cons_index;
681 ring->prod += polled; /* Polled descriptors were realocated in place */
682 mlx4_en_update_rx_prod_db(ring);
687 /* Rx CQ polling - called by NAPI */
688 static int mlx4_en_poll_rx_cq(struct mlx4_en_cq *cq, int budget)
690 struct net_device *dev = cq->dev;
693 done = mlx4_en_process_rx_cq(dev, cq, budget);
699 void mlx4_en_rx_irq(struct mlx4_cq *mcq)
701 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
702 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
705 // Shoot one within the irq context
706 // Because there is no NAPI in freeBSD
707 done = mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET);
708 if (priv->port_up && (done == MLX4_EN_RX_BUDGET) ) {
709 cq->curr_poll_rx_cpu_id = curcpu;
710 taskqueue_enqueue(cq->tq, &cq->cq_task);
713 mlx4_en_arm_cq(priv, cq);
717 void mlx4_en_rx_que(void *context, int pending)
719 struct mlx4_en_cq *cq;
726 sched_bind(td, cq->curr_poll_rx_cpu_id);
729 while (mlx4_en_poll_rx_cq(cq, MLX4_EN_RX_BUDGET)
730 == MLX4_EN_RX_BUDGET);
731 mlx4_en_arm_cq(cq->dev->if_softc, cq);
735 /* RSS related functions */
737 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
738 struct mlx4_en_rx_ring *ring,
739 enum mlx4_qp_state *state,
742 struct mlx4_en_dev *mdev = priv->mdev;
743 struct mlx4_qp_context *context;
746 context = kmalloc(sizeof *context , GFP_KERNEL);
748 en_err(priv, "Failed to allocate qp context\n");
752 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
754 en_err(priv, "Failed to allocate qp #%x\n", qpn);
757 qp->event = mlx4_en_sqp_event;
759 memset(context, 0, sizeof *context);
760 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
761 qpn, ring->cqn, -1, context);
762 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
764 /* Cancel FCS removal if FW allows */
765 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
766 context->param3 |= cpu_to_be32(1 << 29);
767 ring->fcs_del = ETH_FCS_LEN;
771 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
773 mlx4_qp_remove(mdev->dev, qp);
774 mlx4_qp_free(mdev->dev, qp);
776 mlx4_en_update_rx_prod_db(ring);
782 int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
787 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, 0);
789 en_err(priv, "Failed reserving drop qpn\n");
792 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
794 en_err(priv, "Failed allocating drop qp\n");
795 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
802 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
806 qpn = priv->drop_qp.qpn;
807 mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
808 mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
809 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
812 /* Allocate rx qp's and configure them according to rss map */
813 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
815 struct mlx4_en_dev *mdev = priv->mdev;
816 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
817 struct mlx4_qp_context context;
818 struct mlx4_rss_context *rss_context;
821 u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
826 static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
827 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
828 0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
830 en_dbg(DRV, priv, "Configuring rss steering\n");
831 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
833 &rss_map->base_qpn, 0);
835 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
839 for (i = 0; i < priv->rx_ring_num; i++) {
840 priv->rx_ring[i]->qpn = rss_map->base_qpn + i;
841 err = mlx4_en_config_rss_qp(priv, priv->rx_ring[i]->qpn,
851 /* Configure RSS indirection qp */
852 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
854 en_err(priv, "Failed to allocate RSS indirection QP\n");
857 rss_map->indir_qp.event = mlx4_en_sqp_event;
858 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
859 priv->rx_ring[0]->cqn, -1, &context);
861 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
862 rss_rings = priv->rx_ring_num;
864 rss_rings = priv->prof->rss_rings;
866 ptr = ((u8 *)&context) + offsetof(struct mlx4_qp_context, pri_path) +
867 MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
869 rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
870 (rss_map->base_qpn));
871 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
872 if (priv->mdev->profile.udp_rss) {
873 rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
874 rss_context->base_qpn_udp = rss_context->default_qpn;
876 rss_context->flags = rss_mask;
877 rss_context->hash_fn = MLX4_RSS_HASH_TOP;
878 for (i = 0; i < 10; i++)
879 rss_context->rss_key[i] = cpu_to_be32(rsskey[i]);
881 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
882 &rss_map->indir_qp, &rss_map->indir_state);
889 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
890 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
891 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
892 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
894 for (i = 0; i < good_qps; i++) {
895 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
896 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
897 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
898 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
900 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
904 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
906 struct mlx4_en_dev *mdev = priv->mdev;
907 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
910 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
911 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
912 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
913 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
915 for (i = 0; i < priv->rx_ring_num; i++) {
916 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
917 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
918 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
919 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
921 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);