2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/mlx4/cq.h>
38 #include <linux/mlx4/qp.h>
40 #include <net/ethernet.h>
41 #include <net/if_vlan_var.h>
48 static int mlx4_en_alloc_buf(struct mlx4_en_priv *priv,
49 struct mlx4_en_rx_desc *rx_desc,
50 struct mbuf **mb_list,
53 struct mlx4_en_dev *mdev = priv->mdev;
54 struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
59 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, frag_info->frag_size);
61 mb = m_getjcl(M_NOWAIT, MT_DATA, 0, frag_info->frag_size);
63 priv->port_stats.rx_alloc_failed++;
66 dma = pci_map_single(mdev->pdev, mb->m_data, frag_info->frag_size,
68 rx_desc->data[i].addr = cpu_to_be64(dma);
73 static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
74 struct mlx4_en_rx_ring *ring, int index)
76 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
80 /* Set size and memtype fields */
81 for (i = 0; i < priv->num_frags; i++) {
82 rx_desc->data[i].byte_count =
83 cpu_to_be32(priv->frag_info[i].frag_size);
84 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
87 /* If the number of used fragments does not fill up the ring stride,
88 * remaining (unused) fragments must be padded with null address/size
89 * and a special memory key */
90 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
91 for (i = priv->num_frags; i < possible_frags; i++) {
92 rx_desc->data[i].byte_count = 0;
93 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
94 rx_desc->data[i].addr = 0;
98 static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
99 struct mlx4_en_rx_ring *ring, int index)
101 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
102 struct mbuf **mb_list = ring->rx_info + (index << priv->log_rx_info);
105 for (i = 0; i < priv->num_frags; i++)
106 if (mlx4_en_alloc_buf(priv, rx_desc, mb_list, i))
117 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
119 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
122 static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
123 struct mlx4_en_rx_ring *ring,
126 struct mlx4_en_frag_info *frag_info;
127 struct mlx4_en_dev *mdev = priv->mdev;
128 struct mbuf **mb_list;
129 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
133 mb_list = ring->rx_info + (index << priv->log_rx_info);
134 for (nr = 0; nr < priv->num_frags; nr++) {
135 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
136 frag_info = &priv->frag_info[nr];
137 dma = be64_to_cpu(rx_desc->data[nr].addr);
139 en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
140 pci_unmap_single(mdev->pdev, dma, frag_info->frag_size,
146 static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
148 struct mlx4_en_rx_ring *ring;
154 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
155 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
156 ring = &priv->rx_ring[ring_ind];
158 err = mlx4_en_prepare_rx_desc(priv, ring,
161 if (ring->actual_size == 0) {
162 en_err(priv, "Failed to allocate "
163 "enough rx buffers\n");
166 new_size = rounddown_pow_of_two(ring->actual_size);
167 en_warn(priv, "Only %d buffers allocated "
168 "reducing ring size to %d\n",
169 ring->actual_size, new_size);
180 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
181 ring = &priv->rx_ring[ring_ind];
182 while (ring->actual_size > new_size) {
185 mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
192 static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
193 struct mlx4_en_rx_ring *ring)
197 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
198 ring->cons, ring->prod);
200 /* Unmap and free Rx buffers */
201 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
202 while (ring->cons != ring->prod) {
203 index = ring->cons & ring->size_mask;
204 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
205 mlx4_en_free_rx_desc(priv, ring, index);
211 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
212 struct mlx4_en_rx_ring *ring, u32 size)
214 struct mlx4_en_dev *mdev = priv->mdev;
222 ring->size_mask = size - 1;
223 ring->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
224 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
225 ring->log_stride = ffs(ring->stride) - 1;
226 ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
228 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
229 sizeof(struct mbuf *));
231 ring->rx_info = kmalloc(tmp, GFP_KERNEL);
232 if (!ring->rx_info) {
233 en_err(priv, "Failed allocating rx_info ring\n");
236 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d stride:%d (%d)\n",
237 ring->rx_info, tmp, ring->stride, ring->log_stride);
239 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
240 ring->buf_size, 2 * PAGE_SIZE);
244 err = mlx4_en_map_buffer(&ring->wqres.buf);
246 en_err(priv, "Failed to map RX buffer\n");
249 ring->buf = ring->wqres.buf.direct.buf;
253 mlx4_en_unmap_buffer(&ring->wqres.buf);
255 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
257 kfree(ring->rx_info);
258 ring->rx_info = NULL;
262 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
264 struct mlx4_en_rx_ring *ring;
268 int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
269 DS_SIZE * priv->num_frags);
271 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
272 ring = &priv->rx_ring[ring_ind];
276 ring->actual_size = 0;
277 ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
278 ring->stride = stride;
279 if (ring->stride <= TXBB_SIZE)
280 ring->buf += TXBB_SIZE;
282 ring->log_stride = ffs(ring->stride) - 1;
283 ring->buf_size = ring->size * ring->stride;
285 memset(ring->buf, 0, ring->buf_size);
286 mlx4_en_update_rx_prod_db(ring);
288 /* Initailize all descriptors */
289 for (i = 0; i < ring->size; i++)
290 mlx4_en_init_rx_desc(priv, ring, i);
292 /* Configure lro mngr */
293 if (priv->dev->if_capenable & IFCAP_LRO) {
294 if (tcp_lro_init(&ring->lro))
295 priv->dev->if_capenable &= ~IFCAP_LRO;
297 ring->lro.ifp = priv->dev;
301 err = mlx4_en_fill_rx_buffers(priv);
305 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
306 ring = &priv->rx_ring[ring_ind];
308 ring->size_mask = ring->actual_size - 1;
309 mlx4_en_update_rx_prod_db(ring);
316 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
317 mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
322 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
323 struct mlx4_en_rx_ring *ring)
325 struct mlx4_en_dev *mdev = priv->mdev;
327 mlx4_en_unmap_buffer(&ring->wqres.buf);
328 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
329 kfree(ring->rx_info);
330 ring->rx_info = NULL;
333 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
334 struct mlx4_en_rx_ring *ring)
337 tcp_lro_free(&ring->lro);
339 mlx4_en_free_rx_buf(priv, ring);
340 if (ring->stride <= TXBB_SIZE)
341 ring->buf -= TXBB_SIZE;
345 /* Unmap a completed descriptor and free unused pages */
346 static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
347 struct mlx4_en_rx_desc *rx_desc,
348 struct mbuf **mb_list,
351 struct mlx4_en_dev *mdev = priv->mdev;
352 struct mlx4_en_frag_info *frag_info;
358 mb->m_pkthdr.len = length;
359 /* Collect used fragments while replacing them in the HW descirptors */
360 for (nr = 0; nr < priv->num_frags; nr++) {
361 frag_info = &priv->frag_info[nr];
362 if (length <= frag_info->frag_prefix_size)
365 mb->m_next = mb_list[nr];
367 mb->m_len = frag_info[nr].frag_size;
368 dma = be64_to_cpu(rx_desc->data[nr].addr);
370 /* Allocate a replacement page */
371 if (mlx4_en_alloc_buf(priv, rx_desc, mb_list, nr))
375 pci_unmap_single(mdev->pdev, dma, frag_info[nr].frag_size,
378 /* Adjust size of last fragment to match actual length */
379 mb->m_len = length - priv->frag_info[nr - 1].frag_prefix_size;
384 /* Drop all accumulated fragments (which have already been replaced in
385 * the descriptor) of this packet; remaining fragments are reused... */
394 static inline int invalid_cqe(struct mlx4_en_priv *priv,
395 struct mlx4_cqe *cqe)
397 /* Drop packet on bad receive or bad checksum */
398 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
399 MLX4_CQE_OPCODE_ERROR)) {
400 en_err(priv, "CQE completed in error - vendor "
401 "syndrom:%d syndrom:%d\n",
402 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
403 ((struct mlx4_err_cqe *) cqe)->syndrome);
406 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
407 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
414 static void validate_loopback(struct mlx4_en_priv *priv, struct mbuf *mb)
417 int offset = ETHER_HDR_LEN;
419 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
420 if (*(mb->m_data + offset) != (unsigned char) (i & 0xff))
424 priv->loopback_ok = 1;
430 static struct mbuf *mlx4_en_rx_mb(struct mlx4_en_priv *priv,
431 struct mlx4_en_rx_desc *rx_desc,
432 struct mbuf **mb_list,
438 /* Move relevant fragments to mb */
439 if (unlikely(mlx4_en_complete_rx_desc(priv, rx_desc, mb_list, length)))
446 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
448 struct mlx4_en_priv *priv = netdev_priv(dev);
449 struct mlx4_cqe *cqe;
450 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
451 struct mbuf **mb_list;
452 struct mlx4_en_rx_desc *rx_desc;
455 struct lro_entry *queued;
464 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
465 * descriptor offset can be deduced from the CQE index instead of
466 * reading 'cqe->index' */
467 index = cq->mcq.cons_index & ring->size_mask;
468 cqe = &cq->buf[index];
470 /* Process all completed CQEs */
471 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
472 cq->mcq.cons_index & cq->size)) {
474 mb_list = ring->rx_info + (index << priv->log_rx_info);
475 rx_desc = ring->buf + (index << ring->log_stride);
478 * make sure we read the CQE after we read the ownership bit
482 if (invalid_cqe(priv, cqe))
486 * Packet is OK - process it.
488 length = be32_to_cpu(cqe->byte_cnt);
489 mb = mlx4_en_rx_mb(priv, rx_desc, mb_list, length);
495 ring->bytes += length;
498 if (unlikely(priv->validate_loopback)) {
499 validate_loopback(priv, mb);
503 mb->m_pkthdr.flowid = cq->ring;
504 mb->m_flags |= M_FLOWID;
505 mb->m_pkthdr.rcvif = dev;
506 if (be32_to_cpu(cqe->vlan_my_qpn) &
507 MLX4_CQE_VLAN_PRESENT_MASK) {
508 mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->sl_vid);
509 mb->m_flags |= M_VLANTAG;
511 if (likely(priv->rx_csum) &&
512 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
513 (cqe->checksum == cpu_to_be16(0xffff))) {
514 priv->port_stats.rx_chksum_good++;
515 mb->m_pkthdr.csum_flags =
516 CSUM_IP_CHECKED | CSUM_IP_VALID |
517 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
518 mb->m_pkthdr.csum_data = htons(0xffff);
519 /* This packet is eligible for LRO if it is:
520 * - DIX Ethernet (type interpretation)
522 * - without IP options
523 * - not an IP fragment
526 if (mlx4_en_can_lro(cqe->status) &&
527 (dev->if_capenable & IFCAP_LRO)) {
528 if (ring->lro.lro_cnt != 0 &&
529 tcp_lro_rx(&ring->lro, mb, 0) == 0)
534 /* LRO not possible, complete processing here */
535 INC_PERF_COUNTER(priv->pstats.lro_misses);
537 mb->m_pkthdr.csum_flags = 0;
538 priv->port_stats.rx_chksum_none++;
540 if (priv->ip_reasm &&
541 cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4) &&
542 !mlx4_en_rx_frags(priv, ring, mb, cqe))
547 /* Push it up the stack */
548 dev->if_input(dev, mb);
551 ++cq->mcq.cons_index;
552 index = (cq->mcq.cons_index) & ring->size_mask;
553 cqe = &cq->buf[index];
554 if (++polled == budget)
557 /* Flush all pending IP reassembly sessions */
560 mlx4_en_flush_frags(priv, ring);
561 while ((queued = SLIST_FIRST(&ring->lro.lro_active)) != NULL) {
562 SLIST_REMOVE_HEAD(&ring->lro.lro_active, next);
563 tcp_lro_flush(&ring->lro, queued);
566 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
567 mlx4_cq_set_ci(&cq->mcq);
568 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
569 ring->cons = cq->mcq.cons_index;
570 ring->prod += polled; /* Polled descriptors were realocated in place */
571 mlx4_en_update_rx_prod_db(ring);
576 /* Rx CQ polling - called by NAPI */
577 static int mlx4_en_poll_rx_cq(struct mlx4_en_cq *cq, int budget)
579 struct net_device *dev = cq->dev;
582 done = mlx4_en_process_rx_cq(dev, cq, budget);
588 void mlx4_en_rx_que(void *context, int pending)
590 struct mlx4_en_cq *cq;
593 while (mlx4_en_poll_rx_cq(cq, MLX4_EN_MAX_RX_POLL)
594 == MLX4_EN_MAX_RX_POLL);
595 mlx4_en_arm_cq(cq->dev->if_softc, cq);
598 void mlx4_en_rx_irq(struct mlx4_cq *mcq)
600 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
601 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
604 done = mlx4_en_poll_rx_cq(cq, MLX4_EN_MAX_RX_POLL);
605 if (done == MLX4_EN_MAX_RX_POLL)
606 taskqueue_enqueue(cq->tq, &cq->cq_task);
608 mlx4_en_arm_cq(priv, cq);
612 #if MLX4_EN_MAX_RX_FRAGS == 3
613 static int frag_sizes[] = {
618 #elif MLX4_EN_MAX_RX_FRAGS == 2
619 static int frag_sizes[] = {
624 #error "Unknown MAX_RX_FRAGS"
627 void mlx4_en_calc_rx_buf(struct net_device *dev)
629 struct mlx4_en_priv *priv = netdev_priv(dev);
630 int eff_mtu = dev->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETH_LLC_SNAP_SIZE;
634 for (i = 0, frag = 0; buf_size < eff_mtu; frag++, i++) {
636 * Allocate small to large but only as much as is needed for
639 while (i > 0 && eff_mtu - buf_size <= frag_sizes[i - 1])
641 priv->frag_info[frag].frag_size = frag_sizes[i];
642 priv->frag_info[frag].frag_prefix_size = buf_size;
643 buf_size += priv->frag_info[frag].frag_size;
646 priv->num_frags = frag;
647 priv->rx_mb_size = eff_mtu;
649 ROUNDUP_LOG2(priv->num_frags * sizeof(struct mbuf *));
651 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
652 "num_frags:%d):\n", eff_mtu, priv->num_frags);
653 for (i = 0; i < priv->num_frags; i++) {
654 en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d\n", i,
655 priv->frag_info[i].frag_size,
656 priv->frag_info[i].frag_prefix_size)
660 /* RSS related functions */
662 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
663 struct mlx4_en_rx_ring *ring,
664 enum mlx4_qp_state *state,
667 struct mlx4_en_dev *mdev = priv->mdev;
668 struct mlx4_qp_context *context;
671 context = kmalloc(sizeof *context , GFP_KERNEL);
673 en_err(priv, "Failed to allocate qp context\n");
677 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
679 en_err(priv, "Failed to allocate qp #%x\n", qpn);
682 qp->event = mlx4_en_sqp_event;
684 memset(context, 0, sizeof *context);
685 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
686 qpn, ring->cqn, context);
687 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
689 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
691 mlx4_qp_remove(mdev->dev, qp);
692 mlx4_qp_free(mdev->dev, qp);
694 mlx4_en_update_rx_prod_db(ring);
700 /* Allocate rx qp's and configure them according to rss map */
701 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
703 struct mlx4_en_dev *mdev = priv->mdev;
704 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
705 struct mlx4_qp_context context;
706 struct mlx4_en_rss_context *rss_context;
713 if (mdev->profile.udp_rss)
717 en_dbg(DRV, priv, "Configuring rss steering\n");
718 err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
719 roundup_pow_of_two(priv->rx_ring_num),
722 en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
726 for (i = 0; i < priv->rx_ring_num; i++) {
727 qpn = rss_map->base_qpn + i;
728 err = mlx4_en_config_rss_qp(priv, qpn,
738 /* Configure RSS indirection qp */
739 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
741 en_err(priv, "Failed to reserve range for RSS "
745 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
747 en_err(priv, "Failed to allocate RSS indirection QP\n");
750 rss_map->indir_qp.event = mlx4_en_sqp_event;
751 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
752 priv->rx_ring[0].cqn, &context);
754 ptr = ((void *) &context) + 0x3c;
755 rss_context = (struct mlx4_en_rss_context *) ptr;
756 rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
757 (rss_map->base_qpn));
758 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
759 rss_context->flags = rss_mask;
760 rss_context->base_qpn_udp = rss_context->default_qpn;
762 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
763 &rss_map->indir_qp, &rss_map->indir_state);
770 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
771 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
772 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
773 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
775 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
777 for (i = 0; i < good_qps; i++) {
778 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
779 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
780 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
781 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
783 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
787 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
789 struct mlx4_en_dev *mdev = priv->mdev;
790 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
793 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
794 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
795 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
796 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
797 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
799 for (i = 0; i < priv->rx_ring_num; i++) {
800 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
801 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
802 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
803 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
805 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);