2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/mlx4/cq.h>
37 #include <linux/mlx4/qp.h>
39 #include <net/ethernet.h>
40 #include <net/if_vlan_var.h>
47 static int mlx4_en_alloc_buf(struct mlx4_en_priv *priv,
48 struct mlx4_en_rx_desc *rx_desc,
49 struct mbuf **mb_list,
52 struct mlx4_en_dev *mdev = priv->mdev;
53 struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
58 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, frag_info->frag_size);
60 mb = m_getjcl(M_NOWAIT, MT_DATA, 0, frag_info->frag_size);
62 priv->port_stats.rx_alloc_failed++;
65 dma = pci_map_single(mdev->pdev, mb->m_data, frag_info->frag_size,
67 rx_desc->data[i].addr = cpu_to_be64(dma);
72 static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
73 struct mlx4_en_rx_ring *ring, int index)
75 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
79 /* Set size and memtype fields */
80 for (i = 0; i < priv->num_frags; i++) {
81 rx_desc->data[i].byte_count =
82 cpu_to_be32(priv->frag_info[i].frag_size);
83 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
86 /* If the number of used fragments does not fill up the ring stride,
87 * remaining (unused) fragments must be padded with null address/size
88 * and a special memory key */
89 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
90 for (i = priv->num_frags; i < possible_frags; i++) {
91 rx_desc->data[i].byte_count = 0;
92 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
93 rx_desc->data[i].addr = 0;
97 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
98 struct mlx4_en_rx_ring *ring, int index)
100 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
101 struct mbuf **mb_list = ring->rx_info + (index << priv->log_rx_info);
104 for (i = 0; i < priv->num_frags; i++)
105 if (mlx4_en_alloc_buf(priv, rx_desc, mb_list, i))
116 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
118 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
121 static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
122 struct mlx4_en_rx_ring *ring,
125 struct mlx4_en_frag_info *frag_info;
126 struct mlx4_en_dev *mdev = priv->mdev;
127 struct mbuf **mb_list;
128 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
132 mb_list = ring->rx_info + (index << priv->log_rx_info);
133 for (nr = 0; nr < priv->num_frags; nr++) {
134 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
135 frag_info = &priv->frag_info[nr];
136 dma = be64_to_cpu(rx_desc->data[nr].addr);
138 en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
139 pci_unmap_single(mdev->pdev, dma, frag_info->frag_size,
145 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
147 struct mlx4_en_rx_ring *ring;
153 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
154 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
155 ring = &priv->rx_ring[ring_ind];
157 err = mlx4_en_prepare_rx_desc(priv, ring,
160 if (ring->actual_size == 0) {
161 en_err(priv, "Failed to allocate "
162 "enough rx buffers\n");
165 new_size = rounddown_pow_of_two(ring->actual_size);
166 en_warn(priv, "Only %d buffers allocated "
167 "reducing ring size to %d\n",
168 ring->actual_size, new_size);
179 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
180 ring = &priv->rx_ring[ring_ind];
181 while (ring->actual_size > new_size) {
184 mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
191 static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
192 struct mlx4_en_rx_ring *ring)
196 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
197 ring->cons, ring->prod);
199 /* Unmap and free Rx buffers */
200 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
201 while (ring->cons != ring->prod) {
202 index = ring->cons & ring->size_mask;
203 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
204 mlx4_en_free_rx_desc(priv, ring, index);
210 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
211 struct mlx4_en_rx_ring *ring, u32 size)
213 struct mlx4_en_dev *mdev = priv->mdev;
221 ring->size_mask = size - 1;
222 ring->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
223 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
224 ring->log_stride = ffs(ring->stride) - 1;
225 ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
227 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
228 sizeof(struct mbuf *));
230 ring->rx_info = kmalloc(tmp, GFP_KERNEL);
231 if (!ring->rx_info) {
232 en_err(priv, "Failed allocating rx_info ring\n");
235 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d stride:%d (%d)\n",
236 ring->rx_info, tmp, ring->stride, ring->log_stride);
238 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
239 ring->buf_size, 2 * PAGE_SIZE);
243 err = mlx4_en_map_buffer(&ring->wqres.buf);
245 en_err(priv, "Failed to map RX buffer\n");
248 ring->buf = ring->wqres.buf.direct.buf;
252 mlx4_en_unmap_buffer(&ring->wqres.buf);
254 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
256 kfree(ring->rx_info);
257 ring->rx_info = NULL;
261 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
263 struct mlx4_en_rx_ring *ring;
267 int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
268 DS_SIZE * priv->num_frags);
270 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
271 ring = &priv->rx_ring[ring_ind];
275 ring->actual_size = 0;
276 ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
277 ring->stride = stride;
278 if (ring->stride <= TXBB_SIZE)
279 ring->buf += TXBB_SIZE;
281 ring->log_stride = ffs(ring->stride) - 1;
282 ring->buf_size = ring->size * ring->stride;
284 memset(ring->buf, 0, ring->buf_size);
285 mlx4_en_update_rx_prod_db(ring);
287 /* Initailize all descriptors */
288 for (i = 0; i < ring->size; i++)
289 mlx4_en_init_rx_desc(priv, ring, i);
290 /* Configure lro mngr */
291 if (priv->dev->if_capenable & IFCAP_LRO) {
292 if (tcp_lro_init(&ring->lro))
293 priv->dev->if_capenable &= ~IFCAP_LRO;
295 ring->lro.ifp = priv->dev;
298 err = mlx4_en_fill_rx_buffers(priv);
302 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
303 ring = &priv->rx_ring[ring_ind];
305 ring->size_mask = ring->actual_size - 1;
306 mlx4_en_update_rx_prod_db(ring);
313 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
314 mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
319 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
320 struct mlx4_en_rx_ring *ring)
322 struct mlx4_en_dev *mdev = priv->mdev;
324 mlx4_en_unmap_buffer(&ring->wqres.buf);
325 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
326 kfree(ring->rx_info);
327 ring->rx_info = NULL;
330 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
331 struct mlx4_en_rx_ring *ring)
333 tcp_lro_free(&ring->lro);
334 mlx4_en_free_rx_buf(priv, ring);
335 if (ring->stride <= TXBB_SIZE)
336 ring->buf -= TXBB_SIZE;
340 /* Unmap a completed descriptor and free unused pages */
341 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
342 struct mlx4_en_rx_desc *rx_desc,
343 struct mbuf **mb_list,
346 struct mlx4_en_dev *mdev = priv->mdev;
347 struct mlx4_en_frag_info *frag_info;
353 mb->m_pkthdr.len = length;
354 /* Collect used fragments while replacing them in the HW descirptors */
355 for (nr = 0; nr < priv->num_frags; nr++) {
356 frag_info = &priv->frag_info[nr];
357 if (length <= frag_info->frag_prefix_size)
360 mb->m_next = mb_list[nr];
362 mb->m_len = frag_info[nr].frag_size;
363 dma = be64_to_cpu(rx_desc->data[nr].addr);
365 /* Allocate a replacement page */
366 if (mlx4_en_alloc_buf(priv, rx_desc, mb_list, nr))
370 pci_unmap_single(mdev->pdev, dma, frag_info[nr].frag_size,
373 /* Adjust size of last fragment to match actual length */
374 mb->m_len = length - priv->frag_info[nr - 1].frag_prefix_size;
379 /* Drop all accumulated fragments (which have already been replaced in
380 * the descriptor) of this packet; remaining fragments are reused... */
389 static inline int invalid_cqe(struct mlx4_en_priv *priv,
390 struct mlx4_cqe *cqe)
392 /* Drop packet on bad receive or bad checksum */
393 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
394 MLX4_CQE_OPCODE_ERROR)) {
395 en_err(priv, "CQE completed in error - vendor "
396 "syndrom:%d syndrom:%d\n",
397 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
398 ((struct mlx4_err_cqe *) cqe)->syndrome);
401 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
402 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
409 static void validate_loopback(struct mlx4_en_priv *priv, struct mbuf *mb)
412 int offset = ETHER_HDR_LEN;
414 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
415 if (*(mb->m_data + offset) != (unsigned char) (i & 0xff))
419 priv->loopback_ok = 1;
425 static struct mbuf *mlx4_en_rx_mb(struct mlx4_en_priv *priv,
426 struct mlx4_en_rx_desc *rx_desc,
427 struct mbuf **mb_list,
433 /* Move relevant fragments to mb */
434 if (unlikely(mlx4_en_complete_rx_desc(priv, rx_desc, mb_list, length)))
441 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
443 struct mlx4_en_priv *priv = netdev_priv(dev);
444 struct mlx4_cqe *cqe;
445 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
446 struct mbuf **mb_list;
447 struct mlx4_en_rx_desc *rx_desc;
449 struct lro_entry *queued;
457 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
458 * descriptor offset can be deduced from the CQE index instead of
459 * reading 'cqe->index' */
460 index = cq->mcq.cons_index & ring->size_mask;
461 cqe = &cq->buf[index];
463 /* Process all completed CQEs */
464 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
465 cq->mcq.cons_index & cq->size)) {
467 mb_list = ring->rx_info + (index << priv->log_rx_info);
468 rx_desc = ring->buf + (index << ring->log_stride);
471 * make sure we read the CQE after we read the ownership bit
475 if (invalid_cqe(priv, cqe))
479 * Packet is OK - process it.
481 length = be32_to_cpu(cqe->byte_cnt);
482 mb = mlx4_en_rx_mb(priv, rx_desc, mb_list, length);
488 ring->bytes += length;
491 if (unlikely(priv->validate_loopback)) {
492 validate_loopback(priv, mb);
496 mb->m_pkthdr.flowid = cq->ring;
497 mb->m_flags |= M_FLOWID;
498 mb->m_pkthdr.rcvif = dev;
499 if (be32_to_cpu(cqe->vlan_my_qpn) &
500 MLX4_CQE_VLAN_PRESENT_MASK) {
501 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
502 mb->m_flags |= M_VLANTAG;
504 if (likely(priv->rx_csum) &&
505 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
506 (cqe->checksum == cpu_to_be16(0xffff))) {
507 priv->port_stats.rx_chksum_good++;
508 mb->m_pkthdr.csum_flags =
509 CSUM_IP_CHECKED | CSUM_IP_VALID |
510 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
511 mb->m_pkthdr.csum_data = htons(0xffff);
512 /* This packet is eligible for LRO if it is:
513 * - DIX Ethernet (type interpretation)
515 * - without IP options
516 * - not an IP fragment
518 if (mlx4_en_can_lro(cqe->status) &&
519 (dev->if_capenable & IFCAP_LRO)) {
520 if (ring->lro.lro_cnt != 0 &&
521 tcp_lro_rx(&ring->lro, mb, 0) == 0)
525 /* LRO not possible, complete processing here */
526 INC_PERF_COUNTER(priv->pstats.lro_misses);
528 mb->m_pkthdr.csum_flags = 0;
529 priv->port_stats.rx_chksum_none++;
530 if (priv->ip_reasm &&
531 cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4) &&
532 !mlx4_en_rx_frags(priv, ring, mb, cqe))
536 /* Push it up the stack */
537 dev->if_input(dev, mb);
540 ++cq->mcq.cons_index;
541 index = (cq->mcq.cons_index) & ring->size_mask;
542 cqe = &cq->buf[index];
543 if (++polled == budget)
546 /* Flush all pending IP reassembly sessions */
548 mlx4_en_flush_frags(priv, ring);
549 while ((queued = SLIST_FIRST(&ring->lro.lro_active)) != NULL) {
550 SLIST_REMOVE_HEAD(&ring->lro.lro_active, next);
551 tcp_lro_flush(&ring->lro, queued);
553 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
554 mlx4_cq_set_ci(&cq->mcq);
555 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
556 ring->cons = cq->mcq.cons_index;
557 ring->prod += polled; /* Polled descriptors were realocated in place */
558 mlx4_en_update_rx_prod_db(ring);
563 /* Rx CQ polling - called by NAPI */
564 static int mlx4_en_poll_rx_cq(struct mlx4_en_cq *cq, int budget)
566 struct net_device *dev = cq->dev;
569 done = mlx4_en_process_rx_cq(dev, cq, budget);
575 void mlx4_en_rx_que(void *context, int pending)
577 struct mlx4_en_cq *cq;
580 while (mlx4_en_poll_rx_cq(cq, MLX4_EN_MAX_RX_POLL)
581 == MLX4_EN_MAX_RX_POLL);
582 mlx4_en_arm_cq(cq->dev->if_softc, cq);
585 void mlx4_en_rx_irq(struct mlx4_cq *mcq)
587 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
588 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
591 done = mlx4_en_poll_rx_cq(cq, MLX4_EN_MAX_RX_POLL);
592 if (done == MLX4_EN_MAX_RX_POLL)
593 taskqueue_enqueue(cq->tq, &cq->cq_task);
595 mlx4_en_arm_cq(priv, cq);
599 #if MLX4_EN_MAX_RX_FRAGS == 3
600 static int frag_sizes[] = {
605 #elif MLX4_EN_MAX_RX_FRAGS == 2
606 static int frag_sizes[] = {
611 #error "Unknown MAX_RX_FRAGS"
614 void mlx4_en_calc_rx_buf(struct net_device *dev)
616 struct mlx4_en_priv *priv = netdev_priv(dev);
617 int eff_mtu = dev->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETH_LLC_SNAP_SIZE;
621 for (i = 0, frag = 0; buf_size < eff_mtu; frag++, i++) {
623 * Allocate small to large but only as much as is needed for
626 while (i > 0 && eff_mtu - buf_size <= frag_sizes[i - 1])
628 priv->frag_info[frag].frag_size = frag_sizes[i];
629 priv->frag_info[frag].frag_prefix_size = buf_size;
630 buf_size += priv->frag_info[frag].frag_size;
633 priv->num_frags = frag;
634 priv->rx_mb_size = eff_mtu;
636 ROUNDUP_LOG2(priv->num_frags * sizeof(struct mbuf *));
638 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
639 "num_frags:%d):\n", eff_mtu, priv->num_frags);
640 for (i = 0; i < priv->num_frags; i++) {
641 en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d\n", i,
642 priv->frag_info[i].frag_size,
643 priv->frag_info[i].frag_prefix_size)
647 /* RSS related functions */
649 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
650 struct mlx4_en_rx_ring *ring,
651 enum mlx4_qp_state *state,
654 struct mlx4_en_dev *mdev = priv->mdev;
655 struct mlx4_qp_context *context;
658 context = kmalloc(sizeof *context , GFP_KERNEL);
660 en_err(priv, "Failed to allocate qp context\n");
664 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
666 en_err(priv, "Failed to allocate qp #%x\n", qpn);
669 qp->event = mlx4_en_sqp_event;
671 memset(context, 0, sizeof *context);
672 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
673 qpn, ring->cqn, context);
674 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
676 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
678 mlx4_qp_remove(mdev->dev, qp);
679 mlx4_qp_free(mdev->dev, qp);
681 mlx4_en_update_rx_prod_db(ring);
687 /* Allocate rx qp's and configure them according to rss map */
688 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
690 struct mlx4_en_dev *mdev = priv->mdev;
691 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
692 struct mlx4_qp_context context;
693 struct mlx4_en_rss_context *rss_context;
700 if (mdev->profile.udp_rss)
704 en_dbg(DRV, priv, "Configuring rss steering\n");
705 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
706 roundup_pow_of_two(priv->rx_ring_num),
709 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
713 for (i = 0; i < priv->rx_ring_num; i++) {
714 qpn = rss_map->base_qpn + i;
715 err = mlx4_en_config_rss_qp(priv, qpn,
725 /* Configure RSS indirection qp */
726 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
728 en_err(priv, "Failed to reserve range for RSS "
732 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
734 en_err(priv, "Failed to allocate RSS indirection QP\n");
737 rss_map->indir_qp.event = mlx4_en_sqp_event;
738 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
739 priv->rx_ring[0].cqn, &context);
741 ptr = ((void *) &context) + 0x3c;
742 rss_context = (struct mlx4_en_rss_context *) ptr;
743 rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
744 (rss_map->base_qpn));
745 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
746 rss_context->flags = rss_mask;
747 rss_context->base_qpn_udp = rss_context->default_qpn;
749 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
750 &rss_map->indir_qp, &rss_map->indir_state);
757 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
758 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
759 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
760 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
762 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
764 for (i = 0; i < good_qps; i++) {
765 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
766 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
767 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
768 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
770 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
774 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
776 struct mlx4_en_dev *mdev = priv->mdev;
777 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
780 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
781 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
782 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
783 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
784 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
786 for (i = 0; i < priv->rx_ring_num; i++) {
787 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
788 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
789 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
790 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
792 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);