2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <rdma/ib_cache.h>
40 #include <security/mac/mac_framework.h>
42 #include <linux/delay.h>
43 #include <linux/dma-mapping.h>
45 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
46 static int data_debug_level;
48 module_param(data_debug_level, int, 0644);
49 MODULE_PARM_DESC(data_debug_level,
50 "Enable data path debug tracing if > 0");
53 static DEFINE_MUTEX(pkey_mutex);
55 struct ipoib_ah *ipoib_create_ah(struct ipoib_dev_priv *priv,
56 struct ib_pd *pd, struct ib_ah_attr *attr)
60 ah = kmalloc(sizeof *ah, GFP_KERNEL);
68 ah->ah = ib_create_ah(pd, attr);
73 ipoib_dbg(priv, "Created ah %p\n", ah->ah);
78 void ipoib_free_ah(struct kref *kref)
80 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
81 struct ipoib_dev_priv *priv = ah->priv;
85 spin_lock_irqsave(&priv->lock, flags);
86 list_add_tail(&ah->list, &priv->dead_ahs);
87 spin_unlock_irqrestore(&priv->lock, flags);
91 ipoib_dma_unmap_rx(struct ipoib_dev_priv *priv, struct ipoib_rx_buf *rx_req)
96 for (i = 0, m = rx_req->mb; m != NULL; m = m->m_next, i++)
97 ib_dma_unmap_single(priv->ca, rx_req->mapping[i], m->m_len,
102 ipoib_dma_mb(struct ipoib_dev_priv *priv, struct mbuf *mb, unsigned int length)
105 m_adj(mb, -(mb->m_pkthdr.len - length));
109 ipoib_alloc_map_mb(struct ipoib_dev_priv *priv, struct ipoib_rx_buf *rx_req,
116 mb = m_getm2(NULL, size, M_NOWAIT, MT_DATA, M_PKTHDR);
119 for (i = 0, m = mb; m != NULL; m = m->m_next, i++) {
120 m->m_len = (m->m_flags & M_EXT) ? m->m_ext.ext_size :
121 ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN);
122 mb->m_pkthdr.len += m->m_len;
123 rx_req->mapping[i] = ib_dma_map_single(priv->ca,
124 mtod(m, void *), m->m_len, DMA_FROM_DEVICE);
125 if (unlikely(ib_dma_mapping_error(priv->ca,
126 rx_req->mapping[i])))
133 for (j = 0, m = mb; j < i; m = m->m_next, j++)
134 ib_dma_unmap_single(priv->ca, rx_req->mapping[j], m->m_len,
141 static int ipoib_ib_post_receive(struct ipoib_dev_priv *priv, int id)
143 struct ipoib_rx_buf *rx_req;
144 struct ib_recv_wr *bad_wr;
149 rx_req = &priv->rx_ring[id];
150 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
151 priv->rx_sge[i].addr = rx_req->mapping[i];
152 priv->rx_sge[i].length = m->m_len;
154 priv->rx_wr.num_sge = i;
155 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
157 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
159 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
160 ipoib_dma_unmap_rx(priv, &priv->rx_ring[id]);
161 m_freem(priv->rx_ring[id].mb);
162 priv->rx_ring[id].mb = NULL;
169 ipoib_alloc_rx_mb(struct ipoib_dev_priv *priv, int id)
172 return ipoib_alloc_map_mb(priv, &priv->rx_ring[id],
173 priv->max_ib_mtu + IB_GRH_BYTES);
176 static int ipoib_ib_post_receives(struct ipoib_dev_priv *priv)
180 for (i = 0; i < ipoib_recvq_size; ++i) {
181 if (!ipoib_alloc_rx_mb(priv, i)) {
182 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
185 if (ipoib_ib_post_receive(priv, i)) {
186 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
195 ipoib_ib_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
197 struct ipoib_rx_buf saverx;
198 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
199 struct ifnet *dev = priv->dev;
200 struct ipoib_header *eh;
203 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
206 if (unlikely(wr_id >= ipoib_recvq_size)) {
207 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
208 wr_id, ipoib_recvq_size);
212 mb = priv->rx_ring[wr_id].mb;
214 if (unlikely(wc->status != IB_WC_SUCCESS)) {
215 if (wc->status != IB_WC_WR_FLUSH_ERR) {
216 ipoib_warn(priv, "failed recv event "
217 "(status=%d, wrid=%d vend_err %x)\n",
218 wc->status, wr_id, wc->vendor_err);
222 ipoib_dma_unmap_rx(priv, &priv->rx_ring[wr_id]);
224 priv->rx_ring[wr_id].mb = NULL;
230 * Drop packets that this interface sent, ie multicast packets
231 * that the HCA has replicated.
233 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
236 memcpy(&saverx, &priv->rx_ring[wr_id], sizeof(saverx));
238 * If we can't allocate a new RX buffer, dump
239 * this packet and reuse the old buffer.
241 if (unlikely(!ipoib_alloc_rx_mb(priv, wr_id))) {
242 memcpy(&priv->rx_ring[wr_id], &saverx, sizeof(saverx));
247 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
248 wc->byte_len, wc->slid);
250 ipoib_dma_unmap_rx(priv, &saverx);
251 ipoib_dma_mb(priv, mb, wc->byte_len);
254 dev->if_ibytes += mb->m_pkthdr.len;
255 mb->m_pkthdr.rcvif = dev;
256 m_adj(mb, sizeof(struct ib_grh) - INFINIBAND_ALEN);
257 eh = mtod(mb, struct ipoib_header *);
258 bzero(eh->hwaddr, 4); /* Zero the queue pair, only dgid is in grh */
260 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
261 mb->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
263 dev->if_input(dev, mb);
266 if (unlikely(ipoib_ib_post_receive(priv, wr_id)))
267 ipoib_warn(priv, "ipoib_ib_post_receive failed "
268 "for buf %d\n", wr_id);
271 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req, int max)
273 struct mbuf *mb = tx_req->mb;
274 u64 *mapping = tx_req->mapping;
279 for (m = mb, p = NULL, i = 0; m != NULL; p = m, m = m->m_next, i++) {
283 panic("ipoib_dma_map_tx: First mbuf empty\n");
284 p->m_next = m_free(m);
290 tx_req->mb = mb = m_defrag(mb, M_DONTWAIT);
293 for (m = mb, i = 0; m != NULL; m = m->m_next, i++);
298 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) {
299 mapping[i] = ib_dma_map_single(ca, mtod(m, void *),
300 m->m_len, DMA_TO_DEVICE);
301 if (unlikely(ib_dma_mapping_error(ca, mapping[i]))) {
310 for (m = mb, i = 0; i < end; m = m->m_next, i++)
311 ib_dma_unmap_single(ca, mapping[i], m->m_len,
317 void ipoib_dma_unmap_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
319 struct mbuf *mb = tx_req->mb;
320 u64 *mapping = tx_req->mapping;
324 for (m = mb, i = 0; m != NULL; m = m->m_next, i++)
325 ib_dma_unmap_single(ca, mapping[i], m->m_len, DMA_TO_DEVICE);
328 static void ipoib_ib_handle_tx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
330 struct ifnet *dev = priv->dev;
331 unsigned int wr_id = wc->wr_id;
332 struct ipoib_tx_buf *tx_req;
334 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
337 if (unlikely(wr_id >= ipoib_sendq_size)) {
338 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
339 wr_id, ipoib_sendq_size);
343 tx_req = &priv->tx_ring[wr_id];
345 ipoib_dma_unmap_tx(priv->ca, tx_req);
348 dev->if_obytes += tx_req->mb->m_pkthdr.len;
353 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
354 (dev->if_drv_flags & IFF_DRV_OACTIVE) &&
355 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
356 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
358 if (wc->status != IB_WC_SUCCESS &&
359 wc->status != IB_WC_WR_FLUSH_ERR)
360 ipoib_warn(priv, "failed send event "
361 "(status=%d, wrid=%d vend_err %x)\n",
362 wc->status, wr_id, wc->vendor_err);
366 ipoib_poll_tx(struct ipoib_dev_priv *priv)
370 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
371 for (i = 0; i < n; ++i) {
372 struct ib_wc *wc = priv->send_wc + i;
373 if (wc->wr_id & IPOIB_OP_CM)
374 ipoib_cm_handle_tx_wc(priv, wc);
376 ipoib_ib_handle_tx_wc(priv, wc);
379 return n == MAX_SEND_CQE;
383 ipoib_poll(struct ipoib_dev_priv *priv)
389 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
391 for (i = 0; i < n; i++) {
392 struct ib_wc *wc = priv->ibwc + i;
394 if ((wc->wr_id & IPOIB_OP_RECV) == 0)
395 panic("ipoib_poll: Bad wr_id 0x%jX\n",
396 (intmax_t)wc->wr_id);
397 if (wc->wr_id & IPOIB_OP_CM)
398 ipoib_cm_handle_rx_wc(priv, wc);
400 ipoib_ib_handle_rx_wc(priv, wc);
403 if (n != IPOIB_NUM_WC)
407 if (ib_req_notify_cq(priv->recv_cq,
408 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS))
412 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
414 struct ipoib_dev_priv *priv = dev_ptr;
419 static void drain_tx_cq(struct ipoib_dev_priv *priv)
421 struct ifnet *dev = priv->dev;
423 spin_lock(&priv->lock);
424 while (ipoib_poll_tx(priv))
427 if (dev->if_drv_flags & IFF_DRV_OACTIVE)
428 mod_timer(&priv->poll_timer, jiffies + 1);
430 spin_unlock(&priv->lock);
433 void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
435 struct ipoib_dev_priv *priv = dev_ptr;
437 mod_timer(&priv->poll_timer, jiffies);
441 post_send(struct ipoib_dev_priv *priv, unsigned int wr_id,
442 struct ib_ah *address, u32 qpn, struct ipoib_tx_buf *tx_req, void *head,
445 struct ib_send_wr *bad_wr;
446 struct mbuf *mb = tx_req->mb;
447 u64 *mapping = tx_req->mapping;
451 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) {
452 priv->tx_sge[i].addr = mapping[i];
453 priv->tx_sge[i].length = m->m_len;
455 priv->tx_wr.num_sge = i;
456 priv->tx_wr.wr_id = wr_id;
457 priv->tx_wr.wr.ud.remote_qpn = qpn;
458 priv->tx_wr.wr.ud.ah = address;
462 priv->tx_wr.wr.ud.mss = 0; /* XXX mb_shinfo(mb)->gso_size; */
463 priv->tx_wr.wr.ud.header = head;
464 priv->tx_wr.wr.ud.hlen = hlen;
465 priv->tx_wr.opcode = IB_WR_LSO;
467 priv->tx_wr.opcode = IB_WR_SEND;
469 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
473 ipoib_send(struct ipoib_dev_priv *priv, struct mbuf *mb,
474 struct ipoib_ah *address, u32 qpn)
476 struct ifnet *dev = priv->dev;
477 struct ipoib_tx_buf *tx_req;
481 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
482 while (ipoib_poll_tx(priv))
485 m_adj(mb, sizeof (struct ipoib_pseudoheader));
486 if (0 /* XXX segment offload mb_is_gso(mb) */) {
487 /* XXX hlen = mb_transport_offset(mb) + tcp_hdrlen(mb); */
488 phead = mtod(mb, void *);
489 if (mb->m_len < hlen) {
490 ipoib_warn(priv, "linear data too small\n");
497 if (unlikely(mb->m_pkthdr.len - IPOIB_ENCAP_LEN > priv->mcast_mtu)) {
498 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
499 mb->m_pkthdr.len, priv->mcast_mtu);
501 ipoib_cm_mb_too_long(priv, mb, priv->mcast_mtu);
508 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
509 mb->m_pkthdr.len, address, qpn);
512 * We put the mb into the tx_ring _before_ we call post_send()
513 * because it's entirely possible that the completion handler will
514 * run before we execute anything after the post_send(). That
515 * means we have to make sure everything is properly recorded and
516 * our state is consistent before we call post_send().
518 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
520 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req, IPOIB_UD_TX_SG))) {
527 if (mb->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP))
528 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
530 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
532 if (++priv->tx_outstanding == ipoib_sendq_size) {
533 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
534 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
535 ipoib_warn(priv, "request notify on send CQ failed\n");
536 dev->if_drv_flags |= IFF_DRV_OACTIVE;
539 if (unlikely(post_send(priv,
540 priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn,
541 tx_req, phead, hlen))) {
542 ipoib_warn(priv, "post_send failed\n");
544 --priv->tx_outstanding;
545 ipoib_dma_unmap_tx(priv->ca, tx_req);
547 if (dev->if_drv_flags & IFF_DRV_OACTIVE)
548 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
550 address->last_send = priv->tx_head;
555 static void __ipoib_reap_ah(struct ipoib_dev_priv *priv)
557 struct ipoib_ah *ah, *tah;
558 LIST_HEAD(remove_list);
561 spin_lock_irqsave(&priv->lock, flags);
563 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
564 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
566 ib_destroy_ah(ah->ah);
570 spin_unlock_irqrestore(&priv->lock, flags);
573 void ipoib_reap_ah(struct work_struct *work)
575 struct ipoib_dev_priv *priv =
576 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
578 __ipoib_reap_ah(priv);
580 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
581 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
585 static void ipoib_ah_dev_cleanup(struct ipoib_dev_priv *priv)
591 while (!list_empty(&priv->dead_ahs)) {
592 __ipoib_reap_ah(priv);
594 if (time_after(jiffies, begin + HZ)) {
595 ipoib_warn(priv, "timing out; will leak address handles\n");
603 static void ipoib_ib_tx_timer_func(unsigned long ctx)
605 drain_tx_cq((struct ipoib_dev_priv *)ctx);
608 int ipoib_ib_dev_open(struct ipoib_dev_priv *priv)
612 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
613 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
614 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
617 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
619 ret = ipoib_init_qp(priv);
621 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
625 ret = ipoib_ib_post_receives(priv);
627 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
628 ipoib_ib_dev_stop(priv, 1);
632 ret = ipoib_cm_dev_open(priv);
634 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
635 ipoib_ib_dev_stop(priv, 1);
639 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
640 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ);
645 static void ipoib_pkey_dev_check_presence(struct ipoib_dev_priv *priv)
649 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
650 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
652 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
655 int ipoib_ib_dev_up(struct ipoib_dev_priv *priv)
658 ipoib_pkey_dev_check_presence(priv);
660 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
661 ipoib_dbg(priv, "PKEY is not assigned.\n");
665 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
667 return ipoib_mcast_start_thread(priv);
670 int ipoib_ib_dev_down(struct ipoib_dev_priv *priv, int flush)
673 ipoib_dbg(priv, "downing ib_dev\n");
675 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
676 if_link_state_change(priv->dev, LINK_STATE_DOWN);
678 /* Shutdown the P_Key thread if still active */
679 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
680 mutex_lock(&pkey_mutex);
681 set_bit(IPOIB_PKEY_STOP, &priv->flags);
682 cancel_delayed_work(&priv->pkey_poll_task);
683 mutex_unlock(&pkey_mutex);
685 flush_workqueue(ipoib_workqueue);
688 ipoib_mcast_stop_thread(priv, flush);
689 ipoib_mcast_dev_flush(priv);
691 ipoib_flush_paths(priv);
696 static int recvs_pending(struct ipoib_dev_priv *priv)
701 for (i = 0; i < ipoib_recvq_size; ++i)
702 if (priv->rx_ring[i].mb)
708 void ipoib_drain_cq(struct ipoib_dev_priv *priv)
713 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
714 for (i = 0; i < n; ++i) {
716 * Convert any successful completions to flush
717 * errors to avoid passing packets up the
718 * stack after bringing the device down.
720 if (priv->ibwc[i].status == IB_WC_SUCCESS)
721 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
723 if ((priv->ibwc[i].wr_id & IPOIB_OP_RECV) == 0)
724 panic("ipoib_drain_cq: Bad wrid 0x%jX\n",
725 (intmax_t)priv->ibwc[i].wr_id);
726 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
727 ipoib_cm_handle_rx_wc(priv, priv->ibwc + i);
729 ipoib_ib_handle_rx_wc(priv, priv->ibwc + i);
731 } while (n == IPOIB_NUM_WC);
733 spin_lock(&priv->lock);
734 while (ipoib_poll_tx(priv))
737 spin_unlock(&priv->lock);
740 int ipoib_ib_dev_stop(struct ipoib_dev_priv *priv, int flush)
742 struct ib_qp_attr qp_attr;
744 struct ipoib_tx_buf *tx_req;
747 ipoib_cm_dev_stop(priv);
750 * Move our QP to the error state and then reinitialize in
751 * when all work requests have completed or have been flushed.
753 qp_attr.qp_state = IB_QPS_ERR;
754 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
755 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
757 /* Wait for all sends and receives to complete */
760 while (priv->tx_head != priv->tx_tail || recvs_pending(priv)) {
761 if (time_after(jiffies, begin + 5 * HZ)) {
762 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
763 priv->tx_head - priv->tx_tail, recvs_pending(priv));
766 * assume the HW is wedged and just free up
767 * all our pending work requests.
769 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
770 tx_req = &priv->tx_ring[priv->tx_tail &
771 (ipoib_sendq_size - 1)];
772 ipoib_dma_unmap_tx(priv->ca, tx_req);
775 --priv->tx_outstanding;
778 for (i = 0; i < ipoib_recvq_size; ++i) {
779 struct ipoib_rx_buf *rx_req;
781 rx_req = &priv->rx_ring[i];
784 ipoib_dma_unmap_rx(priv, &priv->rx_ring[i]);
792 ipoib_drain_cq(priv);
797 ipoib_dbg(priv, "All sends and receives done.\n");
800 del_timer_sync(&priv->poll_timer);
801 qp_attr.qp_state = IB_QPS_RESET;
802 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
803 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
805 /* Wait for all AHs to be reaped */
806 set_bit(IPOIB_STOP_REAPER, &priv->flags);
807 cancel_delayed_work(&priv->ah_reap_task);
809 flush_workqueue(ipoib_workqueue);
811 ipoib_ah_dev_cleanup(priv);
813 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
818 int ipoib_ib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port)
820 struct ifnet *dev = priv->dev;
826 if (ipoib_transport_dev_init(priv, ca)) {
827 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
831 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
832 (unsigned long) priv);
834 if (dev->if_flags & IFF_UP) {
835 if (ipoib_ib_dev_open(priv)) {
836 ipoib_transport_dev_cleanup(priv);
844 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
845 enum ipoib_flush_level level)
847 struct ipoib_dev_priv *cpriv;
850 mutex_lock(&priv->vlan_mutex);
853 * Flush any child interfaces too -- they might be up even if
854 * the parent is down.
856 list_for_each_entry(cpriv, &priv->child_intfs, list)
857 __ipoib_ib_dev_flush(cpriv, level);
859 mutex_unlock(&priv->vlan_mutex);
861 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
862 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
866 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
867 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
871 if (level == IPOIB_FLUSH_HEAVY) {
872 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
873 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
874 ipoib_ib_dev_down(priv, 0);
875 ipoib_ib_dev_stop(priv, 0);
876 if (ipoib_pkey_dev_delay_open(priv))
880 /* restart QP only if P_Key index is changed */
881 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
882 new_index == priv->pkey_index) {
883 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
886 priv->pkey_index = new_index;
889 if (level == IPOIB_FLUSH_LIGHT) {
890 ipoib_mark_paths_invalid(priv);
891 ipoib_mcast_dev_flush(priv);
894 if (level >= IPOIB_FLUSH_NORMAL)
895 ipoib_ib_dev_down(priv, 0);
897 if (level == IPOIB_FLUSH_HEAVY) {
898 ipoib_ib_dev_stop(priv, 0);
899 ipoib_ib_dev_open(priv);
903 * The device could have been brought down between the start and when
904 * we get here, don't bring it back up if it's not configured up
906 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
907 if (level >= IPOIB_FLUSH_NORMAL)
908 ipoib_ib_dev_up(priv);
909 ipoib_mcast_restart_task(&priv->restart_task);
913 void ipoib_ib_dev_flush_light(struct work_struct *work)
915 struct ipoib_dev_priv *priv =
916 container_of(work, struct ipoib_dev_priv, flush_light);
918 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
921 void ipoib_ib_dev_flush_normal(struct work_struct *work)
923 struct ipoib_dev_priv *priv =
924 container_of(work, struct ipoib_dev_priv, flush_normal);
926 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
929 void ipoib_ib_dev_flush_heavy(struct work_struct *work)
931 struct ipoib_dev_priv *priv =
932 container_of(work, struct ipoib_dev_priv, flush_heavy);
934 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
937 void ipoib_ib_dev_cleanup(struct ipoib_dev_priv *priv)
940 ipoib_dbg(priv, "cleaning up ib_dev\n");
942 ipoib_mcast_stop_thread(priv, 1);
943 ipoib_mcast_dev_flush(priv);
945 ipoib_ah_dev_cleanup(priv);
946 ipoib_transport_dev_cleanup(priv);
950 * Delayed P_Key Assigment Interim Support
952 * The following is initial implementation of delayed P_Key assigment
953 * mechanism. It is using the same approach implemented for the multicast
954 * group join. The single goal of this implementation is to quickly address
955 * Bug #2507. This implementation will probably be removed when the P_Key
956 * change async notification is available.
959 void ipoib_pkey_poll(struct work_struct *work)
961 struct ipoib_dev_priv *priv =
962 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
964 ipoib_pkey_dev_check_presence(priv);
966 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
969 mutex_lock(&pkey_mutex);
970 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
971 queue_delayed_work(ipoib_workqueue,
972 &priv->pkey_poll_task,
974 mutex_unlock(&pkey_mutex);
978 int ipoib_pkey_dev_delay_open(struct ipoib_dev_priv *priv)
981 /* Look for the interface pkey value in the IB Port P_Key table and */
982 /* set the interface pkey assigment flag */
983 ipoib_pkey_dev_check_presence(priv);
985 /* P_Key value not assigned yet - start polling */
986 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
987 mutex_lock(&pkey_mutex);
988 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
989 queue_delayed_work(ipoib_workqueue,
990 &priv->pkey_poll_task,
992 mutex_unlock(&pkey_mutex);