2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
4 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
7 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
43 #include <rdma/ib_cache.h>
45 #include <security/mac/mac_framework.h>
47 #include <linux/delay.h>
48 #include <linux/dma-mapping.h>
50 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
51 static int data_debug_level;
53 module_param(data_debug_level, int, 0644);
54 MODULE_PARM_DESC(data_debug_level,
55 "Enable data path debug tracing if > 0");
58 static DEFINE_MUTEX(pkey_mutex);
60 struct ipoib_ah *ipoib_create_ah(struct ipoib_dev_priv *priv,
61 struct ib_pd *pd, struct ib_ah_attr *attr)
65 ah = kmalloc(sizeof *ah, GFP_KERNEL);
73 ah->ah = ib_create_ah(pd, attr);
78 ipoib_dbg(priv, "Created ah %p\n", ah->ah);
83 void ipoib_free_ah(struct kref *kref)
85 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
86 struct ipoib_dev_priv *priv = ah->priv;
90 spin_lock_irqsave(&priv->lock, flags);
91 list_add_tail(&ah->list, &priv->dead_ahs);
92 spin_unlock_irqrestore(&priv->lock, flags);
96 ipoib_dma_unmap_rx(struct ipoib_dev_priv *priv, struct ipoib_rx_buf *rx_req)
101 for (i = 0, m = rx_req->mb; m != NULL; m = m->m_next, i++)
102 ib_dma_unmap_single(priv->ca, rx_req->mapping[i], m->m_len,
107 ipoib_dma_mb(struct ipoib_dev_priv *priv, struct mbuf *mb, unsigned int length)
110 m_adj(mb, -(mb->m_pkthdr.len - length));
114 ipoib_alloc_map_mb(struct ipoib_dev_priv *priv, struct ipoib_rx_buf *rx_req,
115 int align, int size, int max_frags)
121 mb = m_getm2(NULL, align + size, M_NOWAIT, MT_DATA, M_PKTHDR);
124 for (i = 0, m = mb; m != NULL; m = m->m_next, i++) {
125 MPASS(i < max_frags);
127 m->m_len = M_SIZE(m) - align;
130 mb->m_pkthdr.len += m->m_len;
131 rx_req->mapping[i] = ib_dma_map_single(priv->ca,
132 mtod(m, void *), m->m_len, DMA_FROM_DEVICE);
133 if (unlikely(ib_dma_mapping_error(priv->ca,
134 rx_req->mapping[i])))
141 for (j = 0, m = mb; j < i; m = m->m_next, j++)
142 ib_dma_unmap_single(priv->ca, rx_req->mapping[j], m->m_len,
149 static int ipoib_ib_post_receive(struct ipoib_dev_priv *priv, int id)
151 struct ipoib_rx_buf *rx_req;
152 const struct ib_recv_wr *bad_wr;
157 rx_req = &priv->rx_ring[id];
158 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
159 priv->rx_sge[i].addr = rx_req->mapping[i];
160 priv->rx_sge[i].length = m->m_len;
162 priv->rx_wr.num_sge = i;
163 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
165 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
167 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
168 ipoib_dma_unmap_rx(priv, &priv->rx_ring[id]);
169 m_freem(priv->rx_ring[id].mb);
170 priv->rx_ring[id].mb = NULL;
177 ipoib_alloc_rx_mb(struct ipoib_dev_priv *priv, int id)
179 return ipoib_alloc_map_mb(priv, &priv->rx_ring[id],
180 0, priv->max_ib_mtu + IB_GRH_BYTES, IPOIB_UD_RX_SG);
183 static int ipoib_ib_post_receives(struct ipoib_dev_priv *priv)
187 for (i = 0; i < ipoib_recvq_size; ++i) {
188 if (!ipoib_alloc_rx_mb(priv, i)) {
189 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
192 if (ipoib_ib_post_receive(priv, i)) {
193 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
202 ipoib_ib_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
204 struct ipoib_rx_buf saverx;
205 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
206 struct ifnet *dev = priv->dev;
207 struct ipoib_header *eh;
210 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
213 if (unlikely(wr_id >= ipoib_recvq_size)) {
214 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
215 wr_id, ipoib_recvq_size);
219 mb = priv->rx_ring[wr_id].mb;
221 if (unlikely(wc->status != IB_WC_SUCCESS)) {
222 if (wc->status != IB_WC_WR_FLUSH_ERR) {
223 ipoib_warn(priv, "failed recv event "
224 "(status=%d, wrid=%d vend_err %x)\n",
225 wc->status, wr_id, wc->vendor_err);
229 ipoib_dma_unmap_rx(priv, &priv->rx_ring[wr_id]);
231 priv->rx_ring[wr_id].mb = NULL;
237 * Drop packets that this interface sent, ie multicast packets
238 * that the HCA has replicated.
240 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
243 memcpy(&saverx, &priv->rx_ring[wr_id], sizeof(saverx));
245 * If we can't allocate a new RX buffer, dump
246 * this packet and reuse the old buffer.
248 if (unlikely(!ipoib_alloc_rx_mb(priv, wr_id))) {
249 memcpy(&priv->rx_ring[wr_id], &saverx, sizeof(saverx));
250 if_inc_counter(dev, IFCOUNTER_IQDROPS, 1);
254 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
255 wc->byte_len, wc->slid);
257 ipoib_dma_unmap_rx(priv, &saverx);
258 ipoib_dma_mb(priv, mb, wc->byte_len);
260 if_inc_counter(dev, IFCOUNTER_IPACKETS, 1);
261 if_inc_counter(dev, IFCOUNTER_IBYTES, mb->m_pkthdr.len);
262 mb->m_pkthdr.rcvif = dev;
263 m_adj(mb, sizeof(struct ib_grh) - INFINIBAND_ALEN);
264 eh = mtod(mb, struct ipoib_header *);
265 bzero(eh->hwaddr, 4); /* Zero the queue pair, only dgid is in grh */
267 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
268 mb->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
270 dev->if_input(dev, mb);
273 if (unlikely(ipoib_ib_post_receive(priv, wr_id)))
274 ipoib_warn(priv, "ipoib_ib_post_receive failed "
275 "for buf %d\n", wr_id);
278 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req, int max)
280 struct mbuf *mb = tx_req->mb;
281 u64 *mapping = tx_req->mapping;
286 for (m = mb, p = NULL, i = 0; m != NULL; p = m, m = m->m_next, i++) {
290 panic("ipoib_dma_map_tx: First mbuf empty\n");
291 p->m_next = m_free(m);
297 tx_req->mb = mb = m_defrag(mb, M_NOWAIT);
300 for (m = mb, i = 0; m != NULL; m = m->m_next, i++);
305 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) {
306 mapping[i] = ib_dma_map_single(ca, mtod(m, void *),
307 m->m_len, DMA_TO_DEVICE);
308 if (unlikely(ib_dma_mapping_error(ca, mapping[i]))) {
317 for (m = mb, i = 0; i < end; m = m->m_next, i++)
318 ib_dma_unmap_single(ca, mapping[i], m->m_len,
324 void ipoib_dma_unmap_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
326 struct mbuf *mb = tx_req->mb;
327 u64 *mapping = tx_req->mapping;
331 for (m = mb, i = 0; m != NULL; m = m->m_next, i++)
332 ib_dma_unmap_single(ca, mapping[i], m->m_len, DMA_TO_DEVICE);
335 static void ipoib_ib_handle_tx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
337 struct ifnet *dev = priv->dev;
338 unsigned int wr_id = wc->wr_id;
339 struct ipoib_tx_buf *tx_req;
341 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
344 if (unlikely(wr_id >= ipoib_sendq_size)) {
345 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
346 wr_id, ipoib_sendq_size);
350 tx_req = &priv->tx_ring[wr_id];
352 ipoib_dma_unmap_tx(priv->ca, tx_req);
354 if_inc_counter(dev, IFCOUNTER_OPACKETS, 1);
359 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
360 (dev->if_drv_flags & IFF_DRV_OACTIVE) &&
361 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
362 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
364 if (wc->status != IB_WC_SUCCESS &&
365 wc->status != IB_WC_WR_FLUSH_ERR)
366 ipoib_warn(priv, "failed send event "
367 "(status=%d, wrid=%d vend_err %x)\n",
368 wc->status, wr_id, wc->vendor_err);
372 ipoib_poll_tx(struct ipoib_dev_priv *priv, bool do_start)
376 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
377 for (i = 0; i < n; ++i) {
378 struct ib_wc *wc = priv->send_wc + i;
379 if (wc->wr_id & IPOIB_OP_CM)
380 ipoib_cm_handle_tx_wc(priv, wc);
382 ipoib_ib_handle_tx_wc(priv, wc);
385 if (do_start && n != 0)
386 ipoib_start_locked(priv->dev, priv);
388 return n == MAX_SEND_CQE;
392 ipoib_poll(struct ipoib_dev_priv *priv)
397 spin_lock(&priv->drain_lock);
399 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
400 for (i = 0; i < n; i++) {
401 struct ib_wc *wc = priv->ibwc + i;
403 if ((wc->wr_id & IPOIB_OP_RECV) == 0)
404 panic("ipoib_poll: Bad wr_id 0x%jX\n",
405 (intmax_t)wc->wr_id);
406 if (wc->wr_id & IPOIB_OP_CM)
407 ipoib_cm_handle_rx_wc(priv, wc);
409 ipoib_ib_handle_rx_wc(priv, wc);
412 if (n != IPOIB_NUM_WC)
415 spin_unlock(&priv->drain_lock);
417 if (ib_req_notify_cq(priv->recv_cq,
418 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) > 0)
422 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
424 struct ipoib_dev_priv *priv = dev_ptr;
429 static void drain_tx_cq(struct ipoib_dev_priv *priv)
431 struct ifnet *dev = priv->dev;
433 spin_lock(&priv->lock);
434 while (ipoib_poll_tx(priv, true))
437 if (dev->if_drv_flags & IFF_DRV_OACTIVE)
438 mod_timer(&priv->poll_timer, jiffies + 1);
440 spin_unlock(&priv->lock);
443 void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
445 struct ipoib_dev_priv *priv = dev_ptr;
447 mod_timer(&priv->poll_timer, jiffies);
451 post_send(struct ipoib_dev_priv *priv, unsigned int wr_id,
452 struct ib_ah *address, u32 qpn, struct ipoib_tx_buf *tx_req, void *head,
455 const struct ib_send_wr *bad_wr;
456 struct mbuf *mb = tx_req->mb;
457 u64 *mapping = tx_req->mapping;
461 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) {
462 priv->tx_sge[i].addr = mapping[i];
463 priv->tx_sge[i].length = m->m_len;
465 priv->tx_wr.wr.num_sge = i;
466 priv->tx_wr.wr.wr_id = wr_id;
467 priv->tx_wr.remote_qpn = qpn;
468 priv->tx_wr.ah = address;
471 priv->tx_wr.mss = 0; /* XXX mb_shinfo(mb)->gso_size; */
472 priv->tx_wr.header = head;
473 priv->tx_wr.hlen = hlen;
474 priv->tx_wr.wr.opcode = IB_WR_LSO;
476 priv->tx_wr.wr.opcode = IB_WR_SEND;
478 return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr);
482 ipoib_send(struct ipoib_dev_priv *priv, struct mbuf *mb,
483 struct ipoib_ah *address, u32 qpn)
485 struct ifnet *dev = priv->dev;
486 struct ipoib_tx_buf *tx_req;
490 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
491 while (ipoib_poll_tx(priv, false))
494 m_adj(mb, sizeof (struct ipoib_pseudoheader));
495 if (0 /* XXX segment offload mb_is_gso(mb) */) {
496 /* XXX hlen = mb_transport_offset(mb) + tcp_hdrlen(mb); */
497 phead = mtod(mb, void *);
498 if (mb->m_len < hlen) {
499 ipoib_warn(priv, "linear data too small\n");
500 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
506 if (unlikely(mb->m_pkthdr.len - IPOIB_ENCAP_LEN > priv->mcast_mtu)) {
507 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
508 mb->m_pkthdr.len, priv->mcast_mtu);
509 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
510 ipoib_cm_mb_too_long(priv, mb, priv->mcast_mtu);
517 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
518 mb->m_pkthdr.len, address, qpn);
521 * We put the mb into the tx_ring _before_ we call post_send()
522 * because it's entirely possible that the completion handler will
523 * run before we execute anything after the post_send(). That
524 * means we have to make sure everything is properly recorded and
525 * our state is consistent before we call post_send().
527 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
529 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req, IPOIB_UD_TX_SG))) {
530 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
536 if (mb->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP))
537 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
539 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
541 if (++priv->tx_outstanding == ipoib_sendq_size) {
542 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
543 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
544 ipoib_warn(priv, "request notify on send CQ failed\n");
545 dev->if_drv_flags |= IFF_DRV_OACTIVE;
548 if (unlikely(post_send(priv,
549 priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn,
550 tx_req, phead, hlen))) {
551 ipoib_warn(priv, "post_send failed\n");
552 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
553 --priv->tx_outstanding;
554 ipoib_dma_unmap_tx(priv->ca, tx_req);
556 if (dev->if_drv_flags & IFF_DRV_OACTIVE)
557 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
559 address->last_send = priv->tx_head;
564 static void __ipoib_reap_ah(struct ipoib_dev_priv *priv)
566 struct ipoib_ah *ah, *tah;
567 LIST_HEAD(remove_list);
570 spin_lock_irqsave(&priv->lock, flags);
572 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
573 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
575 ib_destroy_ah(ah->ah);
579 spin_unlock_irqrestore(&priv->lock, flags);
582 void ipoib_reap_ah(struct work_struct *work)
584 struct ipoib_dev_priv *priv =
585 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
587 __ipoib_reap_ah(priv);
589 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
590 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
594 static void ipoib_ah_dev_cleanup(struct ipoib_dev_priv *priv)
600 while (!list_empty(&priv->dead_ahs)) {
601 __ipoib_reap_ah(priv);
603 if (time_after(jiffies, begin + HZ)) {
604 ipoib_warn(priv, "timing out; will leak address handles\n");
612 static void ipoib_ib_tx_timer_func(unsigned long ctx)
614 drain_tx_cq((struct ipoib_dev_priv *)ctx);
617 int ipoib_ib_dev_open(struct ipoib_dev_priv *priv)
621 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
622 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
623 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
626 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
628 ret = ipoib_init_qp(priv);
630 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
634 ret = ipoib_ib_post_receives(priv);
636 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
637 ipoib_ib_dev_stop(priv, 1);
641 ret = ipoib_cm_dev_open(priv);
643 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
644 ipoib_ib_dev_stop(priv, 1);
648 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
649 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ);
651 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
656 static void ipoib_pkey_dev_check_presence(struct ipoib_dev_priv *priv)
660 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
661 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
663 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
666 int ipoib_ib_dev_up(struct ipoib_dev_priv *priv)
669 ipoib_pkey_dev_check_presence(priv);
671 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
672 ipoib_dbg(priv, "PKEY is not assigned.\n");
676 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
678 return ipoib_mcast_start_thread(priv);
681 int ipoib_ib_dev_down(struct ipoib_dev_priv *priv, int flush)
684 ipoib_dbg(priv, "downing ib_dev\n");
686 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
687 if_link_state_change(priv->dev, LINK_STATE_DOWN);
689 /* Shutdown the P_Key thread if still active */
690 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
691 mutex_lock(&pkey_mutex);
692 set_bit(IPOIB_PKEY_STOP, &priv->flags);
693 cancel_delayed_work(&priv->pkey_poll_task);
694 mutex_unlock(&pkey_mutex);
696 flush_workqueue(ipoib_workqueue);
699 ipoib_mcast_stop_thread(priv, flush);
700 ipoib_mcast_dev_flush(priv);
702 ipoib_flush_paths(priv);
707 static int recvs_pending(struct ipoib_dev_priv *priv)
712 for (i = 0; i < ipoib_recvq_size; ++i)
713 if (priv->rx_ring[i].mb)
719 static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
721 enum ib_qp_state new_state)
723 struct ib_qp_attr qp_attr;
724 struct ib_qp_init_attr query_init_attr;
727 ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
729 ipoib_warn(priv, "%s: Failed to query QP (%d)\n", __func__, ret);
733 /* print according to the new-state and the previous state */
734 if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET) {
735 ipoib_dbg(priv, "Failed to modify QP %d->%d, acceptable\n",
736 qp_attr.qp_state, new_state);
738 ipoib_warn(priv, "Failed to modify QP %d->%d\n",
739 qp_attr.qp_state, new_state);
743 void ipoib_drain_cq(struct ipoib_dev_priv *priv)
747 spin_lock(&priv->drain_lock);
749 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
750 for (i = 0; i < n; ++i) {
752 * Convert any successful completions to flush
753 * errors to avoid passing packets up the
754 * stack after bringing the device down.
756 if (priv->ibwc[i].status == IB_WC_SUCCESS)
757 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
759 if ((priv->ibwc[i].wr_id & IPOIB_OP_RECV) == 0)
760 panic("ipoib_drain_cq: Bad wrid 0x%jX\n",
761 (intmax_t)priv->ibwc[i].wr_id);
762 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
763 ipoib_cm_handle_rx_wc(priv, priv->ibwc + i);
765 ipoib_ib_handle_rx_wc(priv, priv->ibwc + i);
767 } while (n == IPOIB_NUM_WC);
768 spin_unlock(&priv->drain_lock);
770 spin_lock(&priv->lock);
771 while (ipoib_poll_tx(priv, true))
774 spin_unlock(&priv->lock);
777 int ipoib_ib_dev_stop(struct ipoib_dev_priv *priv, int flush)
779 struct ib_qp_attr qp_attr;
781 struct ipoib_tx_buf *tx_req;
784 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
786 ipoib_cm_dev_stop(priv);
789 * Move our QP to the error state and then reinitialize in
790 * when all work requests have completed or have been flushed.
792 qp_attr.qp_state = IB_QPS_ERR;
793 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
794 check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
796 /* Wait for all sends and receives to complete */
799 while (priv->tx_head != priv->tx_tail || recvs_pending(priv)) {
800 if (time_after(jiffies, begin + 5 * HZ)) {
801 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
802 priv->tx_head - priv->tx_tail, recvs_pending(priv));
805 * assume the HW is wedged and just free up
806 * all our pending work requests.
808 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
809 tx_req = &priv->tx_ring[priv->tx_tail &
810 (ipoib_sendq_size - 1)];
811 ipoib_dma_unmap_tx(priv->ca, tx_req);
814 --priv->tx_outstanding;
817 for (i = 0; i < ipoib_recvq_size; ++i) {
818 struct ipoib_rx_buf *rx_req;
820 rx_req = &priv->rx_ring[i];
823 ipoib_dma_unmap_rx(priv, &priv->rx_ring[i]);
831 ipoib_drain_cq(priv);
836 ipoib_dbg(priv, "All sends and receives done.\n");
839 del_timer_sync(&priv->poll_timer);
840 qp_attr.qp_state = IB_QPS_RESET;
841 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
842 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
844 /* Wait for all AHs to be reaped */
845 set_bit(IPOIB_STOP_REAPER, &priv->flags);
846 cancel_delayed_work(&priv->ah_reap_task);
848 flush_workqueue(ipoib_workqueue);
850 ipoib_ah_dev_cleanup(priv);
852 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
857 int ipoib_ib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port)
859 struct ifnet *dev = priv->dev;
865 if (ipoib_transport_dev_init(priv, ca)) {
866 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
870 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
871 (unsigned long) priv);
873 if (dev->if_flags & IFF_UP) {
874 if (ipoib_ib_dev_open(priv)) {
875 ipoib_transport_dev_cleanup(priv);
883 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
884 enum ipoib_flush_level level)
886 struct ipoib_dev_priv *cpriv;
889 mutex_lock(&priv->vlan_mutex);
892 * Flush any child interfaces too -- they might be up even if
893 * the parent is down.
895 list_for_each_entry(cpriv, &priv->child_intfs, list)
896 __ipoib_ib_dev_flush(cpriv, level);
898 mutex_unlock(&priv->vlan_mutex);
900 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
901 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
905 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
906 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
910 if (level == IPOIB_FLUSH_HEAVY) {
911 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
912 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
913 ipoib_ib_dev_down(priv, 0);
914 ipoib_ib_dev_stop(priv, 0);
915 if (ipoib_pkey_dev_delay_open(priv))
919 /* restart QP only if P_Key index is changed */
920 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
921 new_index == priv->pkey_index) {
922 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
925 priv->pkey_index = new_index;
928 if (level == IPOIB_FLUSH_LIGHT) {
929 ipoib_mark_paths_invalid(priv);
930 ipoib_mcast_dev_flush(priv);
933 if (level >= IPOIB_FLUSH_NORMAL)
934 ipoib_ib_dev_down(priv, 0);
936 if (level == IPOIB_FLUSH_HEAVY) {
937 ipoib_ib_dev_stop(priv, 0);
938 ipoib_ib_dev_open(priv);
942 * The device could have been brought down between the start and when
943 * we get here, don't bring it back up if it's not configured up
945 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
946 if (level >= IPOIB_FLUSH_NORMAL)
947 ipoib_ib_dev_up(priv);
948 ipoib_mcast_restart_task(&priv->restart_task);
952 void ipoib_ib_dev_flush_light(struct work_struct *work)
954 struct ipoib_dev_priv *priv =
955 container_of(work, struct ipoib_dev_priv, flush_light);
957 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
960 void ipoib_ib_dev_flush_normal(struct work_struct *work)
962 struct ipoib_dev_priv *priv =
963 container_of(work, struct ipoib_dev_priv, flush_normal);
965 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
968 void ipoib_ib_dev_flush_heavy(struct work_struct *work)
970 struct ipoib_dev_priv *priv =
971 container_of(work, struct ipoib_dev_priv, flush_heavy);
973 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
976 void ipoib_ib_dev_cleanup(struct ipoib_dev_priv *priv)
979 ipoib_dbg(priv, "cleaning up ib_dev\n");
981 ipoib_mcast_stop_thread(priv, 1);
982 ipoib_mcast_dev_flush(priv);
984 ipoib_ah_dev_cleanup(priv);
985 ipoib_transport_dev_cleanup(priv);
989 * Delayed P_Key Assigment Interim Support
991 * The following is initial implementation of delayed P_Key assigment
992 * mechanism. It is using the same approach implemented for the multicast
993 * group join. The single goal of this implementation is to quickly address
994 * Bug #2507. This implementation will probably be removed when the P_Key
995 * change async notification is available.
998 void ipoib_pkey_poll(struct work_struct *work)
1000 struct ipoib_dev_priv *priv =
1001 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
1003 ipoib_pkey_dev_check_presence(priv);
1005 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
1008 mutex_lock(&pkey_mutex);
1009 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
1010 queue_delayed_work(ipoib_workqueue,
1011 &priv->pkey_poll_task,
1013 mutex_unlock(&pkey_mutex);
1017 int ipoib_pkey_dev_delay_open(struct ipoib_dev_priv *priv)
1020 /* Look for the interface pkey value in the IB Port P_Key table and */
1021 /* set the interface pkey assigment flag */
1022 ipoib_pkey_dev_check_presence(priv);
1024 /* P_Key value not assigned yet - start polling */
1025 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
1026 mutex_lock(&pkey_mutex);
1027 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
1028 queue_delayed_work(ipoib_workqueue,
1029 &priv->pkey_poll_task,
1031 mutex_unlock(&pkey_mutex);