2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <rdma/ib_cache.h>
40 #include <security/mac/mac_framework.h>
42 #include <linux/delay.h>
43 #include <linux/dma-mapping.h>
45 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
46 static int data_debug_level;
48 module_param(data_debug_level, int, 0644);
49 MODULE_PARM_DESC(data_debug_level,
50 "Enable data path debug tracing if > 0");
53 static DEFINE_MUTEX(pkey_mutex);
55 struct ipoib_ah *ipoib_create_ah(struct ipoib_dev_priv *priv,
56 struct ib_pd *pd, struct ib_ah_attr *attr)
60 ah = kmalloc(sizeof *ah, GFP_KERNEL);
68 ah->ah = ib_create_ah(pd, attr);
73 ipoib_dbg(priv, "Created ah %p\n", ah->ah);
78 void ipoib_free_ah(struct kref *kref)
80 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
81 struct ipoib_dev_priv *priv = ah->priv;
85 spin_lock_irqsave(&priv->lock, flags);
86 list_add_tail(&ah->list, &priv->dead_ahs);
87 spin_unlock_irqrestore(&priv->lock, flags);
91 ipoib_dma_unmap_rx(struct ipoib_dev_priv *priv, struct ipoib_rx_buf *rx_req)
96 for (i = 0, m = rx_req->mb; m != NULL; m = m->m_next, i++)
97 ib_dma_unmap_single(priv->ca, rx_req->mapping[i], m->m_len,
102 ipoib_dma_mb(struct ipoib_dev_priv *priv, struct mbuf *mb, unsigned int length)
105 m_adj(mb, -(mb->m_pkthdr.len - length));
109 ipoib_alloc_map_mb(struct ipoib_dev_priv *priv, struct ipoib_rx_buf *rx_req,
116 mb = m_getm2(NULL, size, M_NOWAIT, MT_DATA, M_PKTHDR);
119 for (i = 0, m = mb; m != NULL; m = m->m_next, i++) {
120 m->m_len = (m->m_flags & M_EXT) ? m->m_ext.ext_size :
121 ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN);
122 mb->m_pkthdr.len += m->m_len;
123 rx_req->mapping[i] = ib_dma_map_single(priv->ca,
124 mtod(m, void *), m->m_len, DMA_FROM_DEVICE);
125 if (unlikely(ib_dma_mapping_error(priv->ca,
126 rx_req->mapping[i])))
133 for (j = 0, m = mb; j < i; m = m->m_next, j++)
134 ib_dma_unmap_single(priv->ca, rx_req->mapping[j], m->m_len,
141 static int ipoib_ib_post_receive(struct ipoib_dev_priv *priv, int id)
143 struct ipoib_rx_buf *rx_req;
144 struct ib_recv_wr *bad_wr;
149 rx_req = &priv->rx_ring[id];
150 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
151 priv->rx_sge[i].addr = rx_req->mapping[i];
152 priv->rx_sge[i].length = m->m_len;
154 priv->rx_wr.num_sge = i;
155 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
157 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
159 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
160 ipoib_dma_unmap_rx(priv, &priv->rx_ring[id]);
161 m_freem(priv->rx_ring[id].mb);
162 priv->rx_ring[id].mb = NULL;
169 ipoib_alloc_rx_mb(struct ipoib_dev_priv *priv, int id)
172 return ipoib_alloc_map_mb(priv, &priv->rx_ring[id],
173 priv->max_ib_mtu + IB_GRH_BYTES);
176 static int ipoib_ib_post_receives(struct ipoib_dev_priv *priv)
180 for (i = 0; i < ipoib_recvq_size; ++i) {
181 if (!ipoib_alloc_rx_mb(priv, i)) {
182 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
185 if (ipoib_ib_post_receive(priv, i)) {
186 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
195 ipoib_ib_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
197 struct ipoib_rx_buf saverx;
198 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
199 struct ifnet *dev = priv->dev;
200 struct ipoib_header *eh;
203 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
206 if (unlikely(wr_id >= ipoib_recvq_size)) {
207 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
208 wr_id, ipoib_recvq_size);
212 mb = priv->rx_ring[wr_id].mb;
214 if (unlikely(wc->status != IB_WC_SUCCESS)) {
215 if (wc->status != IB_WC_WR_FLUSH_ERR) {
216 ipoib_warn(priv, "failed recv event "
217 "(status=%d, wrid=%d vend_err %x)\n",
218 wc->status, wr_id, wc->vendor_err);
222 ipoib_dma_unmap_rx(priv, &priv->rx_ring[wr_id]);
224 priv->rx_ring[wr_id].mb = NULL;
230 * Drop packets that this interface sent, ie multicast packets
231 * that the HCA has replicated.
233 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
236 memcpy(&saverx, &priv->rx_ring[wr_id], sizeof(saverx));
238 * If we can't allocate a new RX buffer, dump
239 * this packet and reuse the old buffer.
241 if (unlikely(!ipoib_alloc_rx_mb(priv, wr_id))) {
242 memcpy(&priv->rx_ring[wr_id], &saverx, sizeof(saverx));
247 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
248 wc->byte_len, wc->slid);
250 ipoib_dma_unmap_rx(priv, &saverx);
251 ipoib_dma_mb(priv, mb, wc->byte_len);
254 dev->if_ibytes += mb->m_pkthdr.len;
255 mb->m_pkthdr.rcvif = dev;
256 m_adj(mb, sizeof(struct ib_grh) - INFINIBAND_ALEN);
257 eh = mtod(mb, struct ipoib_header *);
258 bzero(eh->hwaddr, 4); /* Zero the queue pair, only dgid is in grh */
260 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
261 mb->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
263 dev->if_input(dev, mb);
266 if (unlikely(ipoib_ib_post_receive(priv, wr_id)))
267 ipoib_warn(priv, "ipoib_ib_post_receive failed "
268 "for buf %d\n", wr_id);
271 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req, int max)
273 struct mbuf *mb = tx_req->mb;
274 u64 *mapping = tx_req->mapping;
279 for (m = mb, p = NULL, i = 0; m != NULL; p = m, m = m->m_next, i++) {
283 panic("ipoib_dma_map_tx: First mbuf empty\n");
284 p->m_next = m_free(m);
290 tx_req->mb = mb = m_defrag(mb, M_DONTWAIT);
293 for (m = mb, i = 0; m != NULL; m = m->m_next, i++);
298 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) {
299 mapping[i] = ib_dma_map_single(ca, mtod(m, void *),
300 m->m_len, DMA_TO_DEVICE);
301 if (unlikely(ib_dma_mapping_error(ca, mapping[i]))) {
310 for (m = mb, i = 0; i < end; m = m->m_next, i++)
311 ib_dma_unmap_single(ca, mapping[i], m->m_len,
317 void ipoib_dma_unmap_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
319 struct mbuf *mb = tx_req->mb;
320 u64 *mapping = tx_req->mapping;
324 for (m = mb, i = 0; m != NULL; m = m->m_next, i++)
325 ib_dma_unmap_single(ca, mapping[i], m->m_len, DMA_TO_DEVICE);
328 static void ipoib_ib_handle_tx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
330 struct ifnet *dev = priv->dev;
331 unsigned int wr_id = wc->wr_id;
332 struct ipoib_tx_buf *tx_req;
334 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
337 if (unlikely(wr_id >= ipoib_sendq_size)) {
338 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
339 wr_id, ipoib_sendq_size);
343 tx_req = &priv->tx_ring[wr_id];
345 ipoib_dma_unmap_tx(priv->ca, tx_req);
352 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
353 (dev->if_drv_flags & IFF_DRV_OACTIVE) &&
354 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
355 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
357 if (wc->status != IB_WC_SUCCESS &&
358 wc->status != IB_WC_WR_FLUSH_ERR)
359 ipoib_warn(priv, "failed send event "
360 "(status=%d, wrid=%d vend_err %x)\n",
361 wc->status, wr_id, wc->vendor_err);
365 ipoib_poll_tx(struct ipoib_dev_priv *priv)
369 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
370 for (i = 0; i < n; ++i) {
371 struct ib_wc *wc = priv->send_wc + i;
372 if (wc->wr_id & IPOIB_OP_CM)
373 ipoib_cm_handle_tx_wc(priv, wc);
375 ipoib_ib_handle_tx_wc(priv, wc);
378 return n == MAX_SEND_CQE;
382 ipoib_poll(struct ipoib_dev_priv *priv)
388 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
390 for (i = 0; i < n; i++) {
391 struct ib_wc *wc = priv->ibwc + i;
393 if ((wc->wr_id & IPOIB_OP_RECV) == 0)
394 panic("ipoib_poll: Bad wr_id 0x%jX\n",
395 (intmax_t)wc->wr_id);
396 if (wc->wr_id & IPOIB_OP_CM)
397 ipoib_cm_handle_rx_wc(priv, wc);
399 ipoib_ib_handle_rx_wc(priv, wc);
402 if (n != IPOIB_NUM_WC)
406 if (ib_req_notify_cq(priv->recv_cq,
407 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS))
411 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
413 struct ipoib_dev_priv *priv = dev_ptr;
418 static void drain_tx_cq(struct ipoib_dev_priv *priv)
420 struct ifnet *dev = priv->dev;
422 spin_lock(&priv->lock);
423 while (ipoib_poll_tx(priv))
426 if (dev->if_drv_flags & IFF_DRV_OACTIVE)
427 mod_timer(&priv->poll_timer, jiffies + 1);
429 spin_unlock(&priv->lock);
432 void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
434 struct ipoib_dev_priv *priv = dev_ptr;
436 mod_timer(&priv->poll_timer, jiffies);
440 post_send(struct ipoib_dev_priv *priv, unsigned int wr_id,
441 struct ib_ah *address, u32 qpn, struct ipoib_tx_buf *tx_req, void *head,
444 struct ib_send_wr *bad_wr;
445 struct mbuf *mb = tx_req->mb;
446 u64 *mapping = tx_req->mapping;
450 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) {
451 priv->tx_sge[i].addr = mapping[i];
452 priv->tx_sge[i].length = m->m_len;
454 priv->tx_wr.num_sge = i;
455 priv->tx_wr.wr_id = wr_id;
456 priv->tx_wr.wr.ud.remote_qpn = qpn;
457 priv->tx_wr.wr.ud.ah = address;
461 priv->tx_wr.wr.ud.mss = 0; /* XXX mb_shinfo(mb)->gso_size; */
462 priv->tx_wr.wr.ud.header = head;
463 priv->tx_wr.wr.ud.hlen = hlen;
464 priv->tx_wr.opcode = IB_WR_LSO;
466 priv->tx_wr.opcode = IB_WR_SEND;
468 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
472 ipoib_send(struct ipoib_dev_priv *priv, struct mbuf *mb,
473 struct ipoib_ah *address, u32 qpn)
475 struct ifnet *dev = priv->dev;
476 struct ipoib_tx_buf *tx_req;
480 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
481 while (ipoib_poll_tx(priv))
484 m_adj(mb, sizeof (struct ipoib_pseudoheader));
485 if (0 /* XXX segment offload mb_is_gso(mb) */) {
486 /* XXX hlen = mb_transport_offset(mb) + tcp_hdrlen(mb); */
487 phead = mtod(mb, void *);
488 if (mb->m_len < hlen) {
489 ipoib_warn(priv, "linear data too small\n");
496 if (unlikely(mb->m_pkthdr.len - IPOIB_ENCAP_LEN > priv->mcast_mtu)) {
497 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
498 mb->m_pkthdr.len, priv->mcast_mtu);
500 ipoib_cm_mb_too_long(priv, mb, priv->mcast_mtu);
507 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
508 mb->m_pkthdr.len, address, qpn);
511 * We put the mb into the tx_ring _before_ we call post_send()
512 * because it's entirely possible that the completion handler will
513 * run before we execute anything after the post_send(). That
514 * means we have to make sure everything is properly recorded and
515 * our state is consistent before we call post_send().
517 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
519 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req, IPOIB_UD_TX_SG))) {
526 if (mb->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP))
527 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
529 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
531 if (++priv->tx_outstanding == ipoib_sendq_size) {
532 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
533 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
534 ipoib_warn(priv, "request notify on send CQ failed\n");
535 dev->if_drv_flags |= IFF_DRV_OACTIVE;
538 if (unlikely(post_send(priv,
539 priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn,
540 tx_req, phead, hlen))) {
541 ipoib_warn(priv, "post_send failed\n");
543 --priv->tx_outstanding;
544 ipoib_dma_unmap_tx(priv->ca, tx_req);
546 if (dev->if_drv_flags & IFF_DRV_OACTIVE)
547 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
549 address->last_send = priv->tx_head;
554 static void __ipoib_reap_ah(struct ipoib_dev_priv *priv)
556 struct ipoib_ah *ah, *tah;
557 LIST_HEAD(remove_list);
560 spin_lock_irqsave(&priv->lock, flags);
562 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
563 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
565 ib_destroy_ah(ah->ah);
569 spin_unlock_irqrestore(&priv->lock, flags);
572 void ipoib_reap_ah(struct work_struct *work)
574 struct ipoib_dev_priv *priv =
575 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
577 __ipoib_reap_ah(priv);
579 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
580 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
584 static void ipoib_ah_dev_cleanup(struct ipoib_dev_priv *priv)
590 while (!list_empty(&priv->dead_ahs)) {
591 __ipoib_reap_ah(priv);
593 if (time_after(jiffies, begin + HZ)) {
594 ipoib_warn(priv, "timing out; will leak address handles\n");
602 static void ipoib_ib_tx_timer_func(unsigned long ctx)
604 drain_tx_cq((struct ipoib_dev_priv *)ctx);
607 int ipoib_ib_dev_open(struct ipoib_dev_priv *priv)
611 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
612 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
613 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
616 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
618 ret = ipoib_init_qp(priv);
620 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
624 ret = ipoib_ib_post_receives(priv);
626 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
627 ipoib_ib_dev_stop(priv, 1);
631 ret = ipoib_cm_dev_open(priv);
633 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
634 ipoib_ib_dev_stop(priv, 1);
638 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
639 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ);
644 static void ipoib_pkey_dev_check_presence(struct ipoib_dev_priv *priv)
648 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
649 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
651 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
654 int ipoib_ib_dev_up(struct ipoib_dev_priv *priv)
657 ipoib_pkey_dev_check_presence(priv);
659 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
660 ipoib_dbg(priv, "PKEY is not assigned.\n");
664 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
666 return ipoib_mcast_start_thread(priv);
669 int ipoib_ib_dev_down(struct ipoib_dev_priv *priv, int flush)
672 ipoib_dbg(priv, "downing ib_dev\n");
674 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
675 if_link_state_change(priv->dev, LINK_STATE_DOWN);
677 /* Shutdown the P_Key thread if still active */
678 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
679 mutex_lock(&pkey_mutex);
680 set_bit(IPOIB_PKEY_STOP, &priv->flags);
681 cancel_delayed_work(&priv->pkey_poll_task);
682 mutex_unlock(&pkey_mutex);
684 flush_workqueue(ipoib_workqueue);
687 ipoib_mcast_stop_thread(priv, flush);
688 ipoib_mcast_dev_flush(priv);
690 ipoib_flush_paths(priv);
695 static int recvs_pending(struct ipoib_dev_priv *priv)
700 for (i = 0; i < ipoib_recvq_size; ++i)
701 if (priv->rx_ring[i].mb)
707 void ipoib_drain_cq(struct ipoib_dev_priv *priv)
712 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
713 for (i = 0; i < n; ++i) {
715 * Convert any successful completions to flush
716 * errors to avoid passing packets up the
717 * stack after bringing the device down.
719 if (priv->ibwc[i].status == IB_WC_SUCCESS)
720 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
722 if ((priv->ibwc[i].wr_id & IPOIB_OP_RECV) == 0)
723 panic("ipoib_drain_cq: Bad wrid 0x%jX\n",
724 (intmax_t)priv->ibwc[i].wr_id);
725 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
726 ipoib_cm_handle_rx_wc(priv, priv->ibwc + i);
728 ipoib_ib_handle_rx_wc(priv, priv->ibwc + i);
730 } while (n == IPOIB_NUM_WC);
732 spin_lock(&priv->lock);
733 while (ipoib_poll_tx(priv))
736 spin_unlock(&priv->lock);
739 int ipoib_ib_dev_stop(struct ipoib_dev_priv *priv, int flush)
741 struct ib_qp_attr qp_attr;
743 struct ipoib_tx_buf *tx_req;
746 ipoib_cm_dev_stop(priv);
749 * Move our QP to the error state and then reinitialize in
750 * when all work requests have completed or have been flushed.
752 qp_attr.qp_state = IB_QPS_ERR;
753 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
754 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
756 /* Wait for all sends and receives to complete */
759 while (priv->tx_head != priv->tx_tail || recvs_pending(priv)) {
760 if (time_after(jiffies, begin + 5 * HZ)) {
761 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
762 priv->tx_head - priv->tx_tail, recvs_pending(priv));
765 * assume the HW is wedged and just free up
766 * all our pending work requests.
768 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
769 tx_req = &priv->tx_ring[priv->tx_tail &
770 (ipoib_sendq_size - 1)];
771 ipoib_dma_unmap_tx(priv->ca, tx_req);
774 --priv->tx_outstanding;
777 for (i = 0; i < ipoib_recvq_size; ++i) {
778 struct ipoib_rx_buf *rx_req;
780 rx_req = &priv->rx_ring[i];
783 ipoib_dma_unmap_rx(priv, &priv->rx_ring[i]);
791 ipoib_drain_cq(priv);
796 ipoib_dbg(priv, "All sends and receives done.\n");
799 del_timer_sync(&priv->poll_timer);
800 qp_attr.qp_state = IB_QPS_RESET;
801 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
802 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
804 /* Wait for all AHs to be reaped */
805 set_bit(IPOIB_STOP_REAPER, &priv->flags);
806 cancel_delayed_work(&priv->ah_reap_task);
808 flush_workqueue(ipoib_workqueue);
810 ipoib_ah_dev_cleanup(priv);
812 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
817 int ipoib_ib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port)
819 struct ifnet *dev = priv->dev;
825 if (ipoib_transport_dev_init(priv, ca)) {
826 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
830 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
831 (unsigned long) priv);
833 if (dev->if_flags & IFF_UP) {
834 if (ipoib_ib_dev_open(priv)) {
835 ipoib_transport_dev_cleanup(priv);
843 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
844 enum ipoib_flush_level level)
846 struct ipoib_dev_priv *cpriv;
849 mutex_lock(&priv->vlan_mutex);
852 * Flush any child interfaces too -- they might be up even if
853 * the parent is down.
855 list_for_each_entry(cpriv, &priv->child_intfs, list)
856 __ipoib_ib_dev_flush(cpriv, level);
858 mutex_unlock(&priv->vlan_mutex);
860 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
861 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
865 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
866 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
870 if (level == IPOIB_FLUSH_HEAVY) {
871 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
872 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
873 ipoib_ib_dev_down(priv, 0);
874 ipoib_ib_dev_stop(priv, 0);
875 if (ipoib_pkey_dev_delay_open(priv))
879 /* restart QP only if P_Key index is changed */
880 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
881 new_index == priv->pkey_index) {
882 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
885 priv->pkey_index = new_index;
888 if (level == IPOIB_FLUSH_LIGHT) {
889 ipoib_mark_paths_invalid(priv);
890 ipoib_mcast_dev_flush(priv);
893 if (level >= IPOIB_FLUSH_NORMAL)
894 ipoib_ib_dev_down(priv, 0);
896 if (level == IPOIB_FLUSH_HEAVY) {
897 ipoib_ib_dev_stop(priv, 0);
898 ipoib_ib_dev_open(priv);
902 * The device could have been brought down between the start and when
903 * we get here, don't bring it back up if it's not configured up
905 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
906 if (level >= IPOIB_FLUSH_NORMAL)
907 ipoib_ib_dev_up(priv);
908 ipoib_mcast_restart_task(&priv->restart_task);
912 void ipoib_ib_dev_flush_light(struct work_struct *work)
914 struct ipoib_dev_priv *priv =
915 container_of(work, struct ipoib_dev_priv, flush_light);
917 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
920 void ipoib_ib_dev_flush_normal(struct work_struct *work)
922 struct ipoib_dev_priv *priv =
923 container_of(work, struct ipoib_dev_priv, flush_normal);
925 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
928 void ipoib_ib_dev_flush_heavy(struct work_struct *work)
930 struct ipoib_dev_priv *priv =
931 container_of(work, struct ipoib_dev_priv, flush_heavy);
933 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
936 void ipoib_ib_dev_cleanup(struct ipoib_dev_priv *priv)
939 ipoib_dbg(priv, "cleaning up ib_dev\n");
941 ipoib_mcast_stop_thread(priv, 1);
942 ipoib_mcast_dev_flush(priv);
944 ipoib_ah_dev_cleanup(priv);
945 ipoib_transport_dev_cleanup(priv);
949 * Delayed P_Key Assigment Interim Support
951 * The following is initial implementation of delayed P_Key assigment
952 * mechanism. It is using the same approach implemented for the multicast
953 * group join. The single goal of this implementation is to quickly address
954 * Bug #2507. This implementation will probably be removed when the P_Key
955 * change async notification is available.
958 void ipoib_pkey_poll(struct work_struct *work)
960 struct ipoib_dev_priv *priv =
961 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
963 ipoib_pkey_dev_check_presence(priv);
965 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
968 mutex_lock(&pkey_mutex);
969 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
970 queue_delayed_work(ipoib_workqueue,
971 &priv->pkey_poll_task,
973 mutex_unlock(&pkey_mutex);
977 int ipoib_pkey_dev_delay_open(struct ipoib_dev_priv *priv)
980 /* Look for the interface pkey value in the IB Port P_Key table and */
981 /* set the interface pkey assigment flag */
982 ipoib_pkey_dev_check_presence(priv);
984 /* P_Key value not assigned yet - start polling */
985 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
986 mutex_lock(&pkey_mutex);
987 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
988 queue_delayed_work(ipoib_workqueue,
989 &priv->pkey_poll_task,
991 mutex_unlock(&pkey_mutex);