2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 static int ipoib_resolvemulti(struct ifnet *, struct sockaddr **,
41 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/slab.h>
45 #include <linux/kernel.h>
46 #include <linux/vmalloc.h>
48 #include <linux/if_arp.h> /* For ARPHRD_xxx */
49 #include <linux/if_vlan.h>
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
55 MODULE_LICENSE("Dual BSD/GPL");
57 int ipoib_sendq_size = IPOIB_TX_RING_SIZE;
58 int ipoib_recvq_size = IPOIB_RX_RING_SIZE;
60 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
61 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
62 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
63 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
65 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
66 int ipoib_debug_level = 1;
68 module_param_named(debug_level, ipoib_debug_level, int, 0644);
69 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
72 struct ipoib_path_iter {
73 struct ipoib_dev_priv *priv;
74 struct ipoib_path path;
77 static const u8 ipv4_bcast_addr[] = {
78 0x00, 0xff, 0xff, 0xff,
79 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
80 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
83 struct workqueue_struct *ipoib_workqueue;
85 struct ib_sa_client ipoib_sa_client;
87 static void ipoib_add_one(struct ib_device *device);
88 static void ipoib_remove_one(struct ib_device *device);
89 static void ipoib_start(struct ifnet *dev);
90 static int ipoib_output(struct ifnet *ifp, struct mbuf *m,
91 const struct sockaddr *dst, struct route *ro);
92 static int ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
93 static void ipoib_input(struct ifnet *ifp, struct mbuf *m);
95 #define IPOIB_MTAP(_ifp, _m) \
97 if (bpf_peers_present((_ifp)->if_bpf)) { \
99 ipoib_mtap_mb((_ifp), (_m)); \
104 * This is for clients that have an ipoib_header in the mbuf.
107 ipoib_mtap_mb(struct ifnet *ifp, struct mbuf *mb)
109 struct ipoib_header *ih;
110 struct ether_header eh;
112 ih = mtod(mb, struct ipoib_header *);
113 eh.ether_type = ih->proto;
114 bcopy(ih->hwaddr, &eh.ether_dhost, ETHER_ADDR_LEN);
115 bzero(&eh.ether_shost, ETHER_ADDR_LEN);
116 mb->m_data += sizeof(struct ipoib_header);
117 mb->m_len -= sizeof(struct ipoib_header);
118 bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb);
119 mb->m_data -= sizeof(struct ipoib_header);
120 mb->m_len += sizeof(struct ipoib_header);
124 ipoib_mtap_proto(struct ifnet *ifp, struct mbuf *mb, uint16_t proto)
126 struct ether_header eh;
128 eh.ether_type = proto;
129 bzero(&eh.ether_shost, ETHER_ADDR_LEN);
130 bzero(&eh.ether_dhost, ETHER_ADDR_LEN);
131 bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb);
134 static struct ib_client ipoib_client = {
136 .add = ipoib_add_one,
137 .remove = ipoib_remove_one
141 ipoib_open(struct ipoib_dev_priv *priv)
143 struct ifnet *dev = priv->dev;
145 ipoib_dbg(priv, "bringing up interface\n");
147 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
149 if (ipoib_pkey_dev_delay_open(priv))
152 if (ipoib_ib_dev_open(priv))
155 if (ipoib_ib_dev_up(priv))
158 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
159 struct ipoib_dev_priv *cpriv;
161 /* Bring up any child interfaces too */
162 mutex_lock(&priv->vlan_mutex);
163 list_for_each_entry(cpriv, &priv->child_intfs, list)
164 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) == 0)
166 mutex_unlock(&priv->vlan_mutex);
168 dev->if_drv_flags |= IFF_DRV_RUNNING;
169 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
174 ipoib_ib_dev_stop(priv, 1);
177 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
183 ipoib_init(void *arg)
186 struct ipoib_dev_priv *priv;
190 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0)
192 queue_work(ipoib_workqueue, &priv->flush_light);
197 ipoib_stop(struct ipoib_dev_priv *priv)
199 struct ifnet *dev = priv->dev;
201 ipoib_dbg(priv, "stopping interface\n");
203 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
205 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
207 ipoib_ib_dev_down(priv, 0);
208 ipoib_ib_dev_stop(priv, 0);
210 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
211 struct ipoib_dev_priv *cpriv;
213 /* Bring down any child interfaces too */
214 mutex_lock(&priv->vlan_mutex);
215 list_for_each_entry(cpriv, &priv->child_intfs, list)
216 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) != 0)
218 mutex_unlock(&priv->vlan_mutex);
225 ipoib_change_mtu(struct ipoib_dev_priv *priv, int new_mtu)
227 struct ifnet *dev = priv->dev;
229 /* dev->if_mtu > 2K ==> connected mode */
230 if (ipoib_cm_admin_enabled(priv)) {
231 if (new_mtu > IPOIB_CM_MTU(ipoib_cm_max_mtu(priv)))
234 if (new_mtu > priv->mcast_mtu)
235 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
238 dev->if_mtu = new_mtu;
242 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
245 priv->admin_mtu = new_mtu;
247 dev->if_mtu = min(priv->mcast_mtu, priv->admin_mtu);
249 queue_work(ipoib_workqueue, &priv->flush_light);
255 ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
257 struct ipoib_dev_priv *priv = ifp->if_softc;
258 struct ifaddr *ifa = (struct ifaddr *) data;
259 struct ifreq *ifr = (struct ifreq *) data;
264 if (ifp->if_flags & IFF_UP) {
265 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
266 error = -ipoib_open(priv);
268 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
273 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
274 queue_work(ipoib_workqueue, &priv->restart_task);
277 ifp->if_flags |= IFF_UP;
279 switch (ifa->ifa_addr->sa_family) {
282 ifp->if_init(ifp->if_softc); /* before arpwhohas */
283 arp_ifinit(ifp, ifa);
287 ifp->if_init(ifp->if_softc);
296 sa = (struct sockaddr *) & ifr->ifr_data;
297 bcopy(IF_LLADDR(ifp),
298 (caddr_t) sa->sa_data, INFINIBAND_ALEN);
304 * Set the interface MTU.
306 error = -ipoib_change_mtu(priv, ifr->ifr_mtu);
316 static struct ipoib_path *
317 __path_find(struct ipoib_dev_priv *priv, void *gid)
319 struct rb_node *n = priv->path_tree.rb_node;
320 struct ipoib_path *path;
324 path = rb_entry(n, struct ipoib_path, rb_node);
326 ret = memcmp(gid, path->pathrec.dgid.raw,
327 sizeof (union ib_gid));
341 __path_add(struct ipoib_dev_priv *priv, struct ipoib_path *path)
343 struct rb_node **n = &priv->path_tree.rb_node;
344 struct rb_node *pn = NULL;
345 struct ipoib_path *tpath;
350 tpath = rb_entry(pn, struct ipoib_path, rb_node);
352 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
353 sizeof (union ib_gid));
362 rb_link_node(&path->rb_node, pn, n);
363 rb_insert_color(&path->rb_node, &priv->path_tree);
365 list_add_tail(&path->list, &priv->path_list);
371 ipoib_path_free(struct ipoib_dev_priv *priv, struct ipoib_path *path)
374 _IF_DRAIN(&path->queue);
377 ipoib_put_ah(path->ah);
378 if (ipoib_cm_get(path))
379 ipoib_cm_destroy_tx(ipoib_cm_get(path));
384 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
386 struct ipoib_path_iter *
387 ipoib_path_iter_init(struct ipoib_dev_priv *priv)
389 struct ipoib_path_iter *iter;
391 iter = kmalloc(sizeof *iter, GFP_KERNEL);
396 memset(iter->path.pathrec.dgid.raw, 0, 16);
398 if (ipoib_path_iter_next(iter)) {
407 ipoib_path_iter_next(struct ipoib_path_iter *iter)
409 struct ipoib_dev_priv *priv = iter->priv;
411 struct ipoib_path *path;
414 spin_lock_irq(&priv->lock);
416 n = rb_first(&priv->path_tree);
419 path = rb_entry(n, struct ipoib_path, rb_node);
421 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
422 sizeof (union ib_gid)) < 0) {
431 spin_unlock_irq(&priv->lock);
437 ipoib_path_iter_read(struct ipoib_path_iter *iter, struct ipoib_path *path)
442 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
445 ipoib_mark_paths_invalid(struct ipoib_dev_priv *priv)
447 struct ipoib_path *path, *tp;
449 spin_lock_irq(&priv->lock);
451 list_for_each_entry_safe(path, tp, &priv->path_list, list) {
452 ipoib_dbg(priv, "mark path LID 0x%04x GID %16D invalid\n",
453 be16_to_cpu(path->pathrec.dlid),
454 path->pathrec.dgid.raw, ":");
458 spin_unlock_irq(&priv->lock);
462 ipoib_flush_paths(struct ipoib_dev_priv *priv)
464 struct ipoib_path *path, *tp;
465 LIST_HEAD(remove_list);
468 spin_lock_irqsave(&priv->lock, flags);
470 list_splice_init(&priv->path_list, &remove_list);
472 list_for_each_entry(path, &remove_list, list)
473 rb_erase(&path->rb_node, &priv->path_tree);
475 list_for_each_entry_safe(path, tp, &remove_list, list) {
477 ib_sa_cancel_query(path->query_id, path->query);
478 spin_unlock_irqrestore(&priv->lock, flags);
479 wait_for_completion(&path->done);
480 ipoib_path_free(priv, path);
481 spin_lock_irqsave(&priv->lock, flags);
484 spin_unlock_irqrestore(&priv->lock, flags);
488 path_rec_completion(int status, struct ib_sa_path_rec *pathrec, void *path_ptr)
490 struct ipoib_path *path = path_ptr;
491 struct ipoib_dev_priv *priv = path->priv;
492 struct ifnet *dev = priv->dev;
493 struct ipoib_ah *ah = NULL;
494 struct ipoib_ah *old_ah = NULL;
495 struct ifqueue mbqueue;
500 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %16D\n",
501 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw, ":");
503 ipoib_dbg(priv, "PathRec status %d for GID %16D\n",
504 status, path->pathrec.dgid.raw, ":");
506 bzero(&mbqueue, sizeof(mbqueue));
509 struct ib_ah_attr av;
511 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
512 ah = ipoib_create_ah(priv, priv->pd, &av);
515 spin_lock_irqsave(&priv->lock, flags);
518 path->pathrec = *pathrec;
523 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
524 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
527 _IF_DEQUEUE(&path->queue, mb);
530 _IF_ENQUEUE(&mbqueue, mb);
533 #ifdef CONFIG_INFINIBAND_IPOIB_CM
534 if (ipoib_cm_enabled(priv, path->hwaddr) && !ipoib_cm_get(path))
535 ipoib_cm_set(path, ipoib_cm_create_tx(priv, path));
542 complete(&path->done);
544 spin_unlock_irqrestore(&priv->lock, flags);
547 ipoib_put_ah(old_ah);
550 _IF_DEQUEUE(&mbqueue, mb);
553 mb->m_pkthdr.rcvif = dev;
554 if (dev->if_transmit(dev, mb))
555 ipoib_warn(priv, "dev_queue_xmit failed "
556 "to requeue packet\n");
560 static struct ipoib_path *
561 path_rec_create(struct ipoib_dev_priv *priv, uint8_t *hwaddr)
563 struct ipoib_path *path;
565 if (!priv->broadcast)
568 path = kzalloc(sizeof *path, GFP_ATOMIC);
574 bzero(&path->queue, sizeof(path->queue));
576 #ifdef CONFIG_INFINIBAND_IPOIB_CM
577 memcpy(&path->hwaddr, hwaddr, INFINIBAND_ALEN);
579 memcpy(path->pathrec.dgid.raw, &hwaddr[4], sizeof (union ib_gid));
580 path->pathrec.sgid = priv->local_gid;
581 path->pathrec.pkey = cpu_to_be16(priv->pkey);
582 path->pathrec.numb_path = 1;
583 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
589 path_rec_start(struct ipoib_dev_priv *priv, struct ipoib_path *path)
591 struct ifnet *dev = priv->dev;
593 ib_sa_comp_mask comp_mask = IB_SA_PATH_REC_MTU_SELECTOR | IB_SA_PATH_REC_MTU;
594 struct ib_sa_path_rec p_rec;
596 p_rec = path->pathrec;
597 p_rec.mtu_selector = IB_SA_GT;
599 switch (roundup_pow_of_two(dev->if_mtu + IPOIB_ENCAP_LEN)) {
601 p_rec.mtu = IB_MTU_256;
604 p_rec.mtu = IB_MTU_512;
607 p_rec.mtu = IB_MTU_1024;
610 p_rec.mtu = IB_MTU_2048;
613 /* Wildcard everything */
616 p_rec.mtu_selector = 0;
619 ipoib_dbg(priv, "Start path record lookup for %16D MTU > %d\n",
621 comp_mask ? ib_mtu_enum_to_int(p_rec.mtu) : 0);
623 init_completion(&path->done);
626 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
628 IB_SA_PATH_REC_DGID |
629 IB_SA_PATH_REC_SGID |
630 IB_SA_PATH_REC_NUMB_PATH |
631 IB_SA_PATH_REC_TRAFFIC_CLASS |
636 if (path->query_id < 0) {
637 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
639 complete(&path->done);
640 return path->query_id;
647 ipoib_unicast_send(struct mbuf *mb, struct ipoib_dev_priv *priv, struct ipoib_header *eh)
649 struct ipoib_path *path;
651 path = __path_find(priv, eh->hwaddr + 4);
652 if (!path || !path->valid) {
656 path = path_rec_create(priv, eh->hwaddr);
660 _IF_ENQUEUE(&path->queue, mb);
661 if (!path->query && path_rec_start(priv, path)) {
662 spin_unlock_irqrestore(&priv->lock, flags);
664 ipoib_path_free(priv, path);
667 __path_add(priv, path);
669 ++priv->dev->if_oerrors;
676 if (ipoib_cm_get(path) && ipoib_cm_up(path)) {
677 ipoib_cm_send(priv, mb, ipoib_cm_get(path));
678 } else if (path->ah) {
679 ipoib_send(priv, mb, path->ah, IPOIB_QPN(eh->hwaddr));
680 } else if ((path->query || !path_rec_start(priv, path)) &&
681 path->queue.ifq_len < IPOIB_MAX_PATH_REC_QUEUE) {
682 _IF_ENQUEUE(&path->queue, mb);
684 ++priv->dev->if_oerrors;
690 ipoib_send_one(struct ipoib_dev_priv *priv, struct mbuf *mb)
692 struct ipoib_header *eh;
694 eh = mtod(mb, struct ipoib_header *);
695 if (IPOIB_IS_MULTICAST(eh->hwaddr)) {
696 /* Add in the P_Key for multicast*/
697 eh->hwaddr[8] = (priv->pkey >> 8) & 0xff;
698 eh->hwaddr[9] = priv->pkey & 0xff;
700 ipoib_mcast_send(priv, eh->hwaddr + 4, mb);
702 ipoib_unicast_send(mb, priv, eh);
709 _ipoib_start(struct ifnet *dev, struct ipoib_dev_priv *priv)
713 if ((dev->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
717 spin_lock(&priv->lock);
718 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd) &&
719 (dev->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
720 IFQ_DRV_DEQUEUE(&dev->if_snd, mb);
724 ipoib_send_one(priv, mb);
726 spin_unlock(&priv->lock);
730 ipoib_start(struct ifnet *dev)
732 _ipoib_start(dev, dev->if_softc);
736 ipoib_vlan_start(struct ifnet *dev)
738 struct ipoib_dev_priv *priv;
741 priv = VLAN_COOKIE(dev);
743 return _ipoib_start(dev, priv);
744 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd)) {
745 IFQ_DRV_DEQUEUE(&dev->if_snd, mb);
754 ipoib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port)
757 /* Allocate RX/TX "rings" to hold queued mbs */
758 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
760 if (!priv->rx_ring) {
761 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
762 ca->name, ipoib_recvq_size);
766 priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, GFP_KERNEL);
767 if (!priv->tx_ring) {
768 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
769 ca->name, ipoib_sendq_size);
770 goto out_rx_ring_cleanup;
772 memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
774 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
776 if (ipoib_ib_dev_init(priv, ca, port))
777 goto out_tx_ring_cleanup;
782 kfree(priv->tx_ring);
785 kfree(priv->rx_ring);
792 ipoib_detach(struct ipoib_dev_priv *priv)
797 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
802 VLAN_SETCOOKIE(priv->dev, NULL);
808 ipoib_dev_cleanup(struct ipoib_dev_priv *priv)
810 struct ipoib_dev_priv *cpriv, *tcpriv;
812 /* Delete any child interfaces first */
813 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
814 ipoib_dev_cleanup(cpriv);
818 ipoib_ib_dev_cleanup(priv);
820 kfree(priv->rx_ring);
821 kfree(priv->tx_ring);
823 priv->rx_ring = NULL;
824 priv->tx_ring = NULL;
827 static volatile int ipoib_unit;
829 static struct ipoib_dev_priv *
830 ipoib_priv_alloc(void)
832 struct ipoib_dev_priv *priv;
834 priv = malloc(sizeof(struct ipoib_dev_priv), M_TEMP, M_ZERO|M_WAITOK);
835 spin_lock_init(&priv->lock);
836 mutex_init(&priv->vlan_mutex);
837 INIT_LIST_HEAD(&priv->path_list);
838 INIT_LIST_HEAD(&priv->child_intfs);
839 INIT_LIST_HEAD(&priv->dead_ahs);
840 INIT_LIST_HEAD(&priv->multicast_list);
841 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
842 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
843 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
844 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
845 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
846 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
847 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
848 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
849 memcpy(priv->broadcastaddr, ipv4_bcast_addr, INFINIBAND_ALEN);
854 struct ipoib_dev_priv *
855 ipoib_intf_alloc(const char *name)
857 struct ipoib_dev_priv *priv;
858 struct sockaddr_dl *sdl;
861 priv = ipoib_priv_alloc();
862 dev = priv->dev = if_alloc(IFT_INFINIBAND);
867 dev->if_softc = priv;
868 if_initname(dev, name, atomic_fetchadd_int(&ipoib_unit, 1));
869 dev->if_flags = IFF_BROADCAST | IFF_MULTICAST;
870 dev->if_addrlen = INFINIBAND_ALEN;
871 dev->if_hdrlen = IPOIB_HEADER_LEN;
873 dev->if_init = ipoib_init;
874 dev->if_ioctl = ipoib_ioctl;
875 dev->if_start = ipoib_start;
876 dev->if_output = ipoib_output;
877 dev->if_input = ipoib_input;
878 dev->if_resolvemulti = ipoib_resolvemulti;
879 if_initbaudrate(dev, IF_Gbps(10));
880 dev->if_broadcastaddr = priv->broadcastaddr;
881 dev->if_snd.ifq_maxlen = ipoib_sendq_size * 2;
882 sdl = (struct sockaddr_dl *)dev->if_addr->ifa_addr;
883 sdl->sdl_type = IFT_INFINIBAND;
884 sdl->sdl_alen = dev->if_addrlen;
886 if_link_state_change(dev, LINK_STATE_DOWN);
887 bpfattach(dev, DLT_EN10MB, ETHER_HDR_LEN);
889 return dev->if_softc;
893 ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
895 struct ib_device_attr *device_attr;
896 int result = -ENOMEM;
898 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
900 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
901 hca->name, sizeof *device_attr);
905 result = ib_query_device(hca, device_attr);
907 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
912 priv->hca_caps = device_attr->device_cap_flags;
916 priv->dev->if_hwassist = 0;
917 priv->dev->if_capabilities = 0;
919 #ifndef CONFIG_INFINIBAND_IPOIB_CM
920 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
921 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
922 priv->dev->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
923 priv->dev->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
927 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) {
928 priv->dev->if_capabilities |= IFCAP_TSO4;
929 priv->dev->if_hwassist |= CSUM_TSO;
933 priv->dev->if_capabilities |=
934 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_LINKSTATE;
935 priv->dev->if_capenable = priv->dev->if_capabilities;
941 static struct ifnet *
942 ipoib_add_port(const char *format, struct ib_device *hca, u8 port)
944 struct ipoib_dev_priv *priv;
945 struct ib_port_attr attr;
946 int result = -ENOMEM;
948 priv = ipoib_intf_alloc(format);
950 goto alloc_mem_failed;
952 if (!ib_query_port(hca, port, &attr))
953 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
955 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
957 goto device_init_failed;
960 /* MTU will be reset when mcast join happens */
961 priv->dev->if_mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
962 priv->mcast_mtu = priv->admin_mtu = priv->dev->if_mtu;
964 result = ib_query_pkey(hca, port, 0, &priv->pkey);
966 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
967 hca->name, port, result);
968 goto device_init_failed;
971 if (ipoib_set_dev_features(priv, hca))
972 goto device_init_failed;
975 * Set the full membership bit, so that we join the right
976 * broadcast group, etc.
978 priv->pkey |= 0x8000;
980 priv->broadcastaddr[8] = priv->pkey >> 8;
981 priv->broadcastaddr[9] = priv->pkey & 0xff;
983 result = ib_query_gid(hca, port, 0, &priv->local_gid);
985 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
986 hca->name, port, result);
987 goto device_init_failed;
989 memcpy(IF_LLADDR(priv->dev) + 4, priv->local_gid.raw, sizeof (union ib_gid));
991 result = ipoib_dev_init(priv, hca, port);
993 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
994 hca->name, port, result);
995 goto device_init_failed;
997 if (ipoib_cm_admin_enabled(priv))
998 priv->dev->if_mtu = IPOIB_CM_MTU(ipoib_cm_max_mtu(priv));
1000 INIT_IB_EVENT_HANDLER(&priv->event_handler,
1001 priv->ca, ipoib_event);
1002 result = ib_register_event_handler(&priv->event_handler);
1004 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1005 "port %d (ret = %d)\n",
1006 hca->name, port, result);
1009 if_printf(priv->dev, "Attached to %s port %d\n", hca->name, port);
1014 ipoib_dev_cleanup(priv);
1020 return ERR_PTR(result);
1024 ipoib_add_one(struct ib_device *device)
1026 struct list_head *dev_list;
1028 struct ipoib_dev_priv *priv;
1031 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1034 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1038 INIT_LIST_HEAD(dev_list);
1040 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1045 e = device->phys_port_cnt;
1048 for (p = s; p <= e; ++p) {
1049 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
1051 dev = ipoib_add_port("ib", device, p);
1053 priv = dev->if_softc;
1054 list_add_tail(&priv->list, dev_list);
1058 ib_set_client_data(device, &ipoib_client, dev_list);
1062 ipoib_remove_one(struct ib_device *device)
1064 struct ipoib_dev_priv *priv, *tmp;
1065 struct list_head *dev_list;
1067 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1070 dev_list = ib_get_client_data(device, &ipoib_client);
1072 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1073 if (rdma_port_get_link_layer(device, priv->port) != IB_LINK_LAYER_INFINIBAND)
1078 ib_unregister_event_handler(&priv->event_handler);
1080 /* dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); */
1082 flush_workqueue(ipoib_workqueue);
1084 ipoib_dev_cleanup(priv);
1092 ipoib_config_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
1094 struct ipoib_dev_priv *parent;
1095 struct ipoib_dev_priv *priv;
1100 if (ifp->if_type != IFT_INFINIBAND)
1102 dev = VLAN_DEVAT(ifp, vtag);
1107 parent = ifp->if_softc;
1108 /* We only support 15 bits of pkey. */
1111 pkey = vtag | 0x8000; /* Set full membership bit. */
1112 if (pkey == parent->pkey)
1114 /* Check for dups */
1115 mutex_lock(&parent->vlan_mutex);
1116 list_for_each_entry(priv, &parent->child_intfs, list) {
1117 if (priv->pkey == pkey) {
1123 priv = ipoib_priv_alloc();
1125 priv->max_ib_mtu = parent->max_ib_mtu;
1126 priv->mcast_mtu = priv->admin_mtu = parent->dev->if_mtu;
1127 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
1128 error = ipoib_set_dev_features(priv, parent->ca);
1132 priv->broadcastaddr[8] = pkey >> 8;
1133 priv->broadcastaddr[9] = pkey & 0xff;
1134 dev->if_broadcastaddr = priv->broadcastaddr;
1135 error = ipoib_dev_init(priv, parent->ca, parent->port);
1138 priv->parent = parent->dev;
1139 list_add_tail(&priv->list, &parent->child_intfs);
1140 VLAN_SETCOOKIE(dev, priv);
1141 dev->if_start = ipoib_vlan_start;
1142 dev->if_drv_flags &= ~IFF_DRV_RUNNING;
1143 dev->if_hdrlen = IPOIB_HEADER_LEN;
1144 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1146 mutex_unlock(&parent->vlan_mutex);
1149 mutex_unlock(&parent->vlan_mutex);
1154 "failed to initialize subinterface: device %s, port %d vtag 0x%X",
1155 parent->ca->name, parent->port, vtag);
1160 ipoib_unconfig_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
1162 struct ipoib_dev_priv *parent;
1163 struct ipoib_dev_priv *priv;
1167 if (ifp->if_type != IFT_INFINIBAND)
1170 dev = VLAN_DEVAT(ifp, vtag);
1172 VLAN_SETCOOKIE(dev, NULL);
1173 pkey = vtag | 0x8000;
1174 parent = ifp->if_softc;
1175 mutex_lock(&parent->vlan_mutex);
1176 list_for_each_entry(priv, &parent->child_intfs, list) {
1177 if (priv->pkey == pkey) {
1178 ipoib_dev_cleanup(priv);
1179 list_del(&priv->list);
1183 mutex_unlock(&parent->vlan_mutex);
1186 eventhandler_tag ipoib_vlan_attach;
1187 eventhandler_tag ipoib_vlan_detach;
1190 ipoib_init_module(void)
1194 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1195 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1196 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1198 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1199 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1200 ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE,
1201 IPOIB_MIN_QUEUE_SIZE));
1202 #ifdef CONFIG_INFINIBAND_IPOIB_CM
1203 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1206 ipoib_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1207 ipoib_config_vlan, NULL, EVENTHANDLER_PRI_FIRST);
1208 ipoib_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
1209 ipoib_unconfig_vlan, NULL, EVENTHANDLER_PRI_FIRST);
1212 * We create our own workqueue mainly because we want to be
1213 * able to flush it when devices are being removed. We can't
1214 * use schedule_work()/flush_scheduled_work() because both
1215 * unregister_netdev() and linkwatch_event take the rtnl lock,
1216 * so flush_scheduled_work() can deadlock during device
1219 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1220 if (!ipoib_workqueue) {
1225 ib_sa_register_client(&ipoib_sa_client);
1227 ret = ib_register_client(&ipoib_client);
1234 ib_sa_unregister_client(&ipoib_sa_client);
1235 destroy_workqueue(ipoib_workqueue);
1242 ipoib_cleanup_module(void)
1245 EVENTHANDLER_DEREGISTER(vlan_config, ipoib_vlan_attach);
1246 EVENTHANDLER_DEREGISTER(vlan_unconfig, ipoib_vlan_detach);
1247 ib_unregister_client(&ipoib_client);
1248 ib_sa_unregister_client(&ipoib_sa_client);
1249 destroy_workqueue(ipoib_workqueue);
1253 * Infiniband output routine.
1256 ipoib_output(struct ifnet *ifp, struct mbuf *m,
1257 const struct sockaddr *dst, struct route *ro)
1259 u_char edst[INFINIBAND_ALEN];
1260 struct llentry *lle = NULL;
1261 struct rtentry *rt0 = NULL;
1262 struct ipoib_header *eh;
1267 if (!(m->m_flags & (M_BCAST | M_MCAST)))
1272 error = mac_ifnet_check_transmit(ifp, m);
1278 if (ifp->if_flags & IFF_MONITOR) {
1282 if (!((ifp->if_flags & IFF_UP) &&
1283 (ifp->if_drv_flags & IFF_DRV_RUNNING))) {
1288 switch (dst->sa_family) {
1291 if (lle != NULL && (lle->la_flags & LLE_VALID))
1292 memcpy(edst, &lle->ll_addr.mac8, sizeof(edst));
1293 else if (m->m_flags & M_MCAST)
1294 ip_ib_mc_map(((struct sockaddr_in *)dst)->sin_addr.s_addr, ifp->if_broadcastaddr, edst);
1296 error = arpresolve(ifp, rt0, m, dst, edst, &lle);
1298 return (error == EWOULDBLOCK ? 0 : error);
1299 type = htons(ETHERTYPE_IP);
1304 ah = mtod(m, struct arphdr *);
1305 ah->ar_hrd = htons(ARPHRD_INFINIBAND);
1307 switch(ntohs(ah->ar_op)) {
1308 case ARPOP_REVREQUEST:
1309 case ARPOP_REVREPLY:
1310 type = htons(ETHERTYPE_REVARP);
1315 type = htons(ETHERTYPE_ARP);
1319 if (m->m_flags & M_BCAST)
1320 bcopy(ifp->if_broadcastaddr, edst, INFINIBAND_ALEN);
1322 bcopy(ar_tha(ah), edst, INFINIBAND_ALEN);
1329 if (lle != NULL && (lle->la_flags & LLE_VALID))
1330 memcpy(edst, &lle->ll_addr.mac8, sizeof(edst));
1331 else if (m->m_flags & M_MCAST)
1332 ipv6_ib_mc_map(&((struct sockaddr_in6 *)dst)->sin6_addr, ifp->if_broadcastaddr, edst);
1334 error = nd6_storelladdr(ifp, m, dst, (u_char *)edst, &lle);
1337 type = htons(ETHERTYPE_IPV6);
1342 if_printf(ifp, "can't handle af%d\n", dst->sa_family);
1343 error = EAFNOSUPPORT;
1348 * Add local net header. If no space in first mbuf,
1351 M_PREPEND(m, IPOIB_HEADER_LEN, M_NOWAIT);
1356 eh = mtod(m, struct ipoib_header *);
1357 (void)memcpy(&eh->proto, &type, sizeof(eh->proto));
1358 (void)memcpy(&eh->hwaddr, edst, sizeof (edst));
1361 * Queue message on interface, update output statistics if
1362 * successful, and start output if interface not yet active.
1364 return ((ifp->if_transmit)(ifp, m));
1372 * Upper layer processing for a received Infiniband packet.
1375 ipoib_demux(struct ifnet *ifp, struct mbuf *m, u_short proto)
1381 * Tag the mbuf with an appropriate MAC label before any other
1382 * consumers can get to it.
1384 mac_ifnet_create_mbuf(ifp, m);
1386 /* Allow monitor mode to claim this frame, after stats are updated. */
1387 if (ifp->if_flags & IFF_MONITOR) {
1388 if_printf(ifp, "discard frame at IFF_MONITOR\n");
1393 * Dispatch frame to upper layer.
1402 if (ifp->if_flags & IFF_NOARP) {
1403 /* Discard packet if ARP is disabled on interface */
1411 case ETHERTYPE_IPV6:
1418 netisr_dispatch(isr, m);
1426 * Process a received Infiniband packet.
1429 ipoib_input(struct ifnet *ifp, struct mbuf *m)
1431 struct ipoib_header *eh;
1433 if ((ifp->if_flags & IFF_UP) == 0) {
1437 CURVNET_SET_QUIET(ifp->if_vnet);
1439 /* Let BPF have it before we strip the header. */
1441 eh = mtod(m, struct ipoib_header *);
1443 * Reset layer specific mbuf flags to avoid confusing upper layers.
1444 * Strip off Infiniband header.
1446 m->m_flags &= ~M_VLANTAG;
1448 m_adj(m, IPOIB_HEADER_LEN);
1450 if (IPOIB_IS_MULTICAST(eh->hwaddr)) {
1451 if (memcmp(eh->hwaddr, ifp->if_broadcastaddr,
1452 ifp->if_addrlen) == 0)
1453 m->m_flags |= M_BCAST;
1455 m->m_flags |= M_MCAST;
1459 ipoib_demux(ifp, m, ntohs(eh->proto));
1464 ipoib_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
1465 struct sockaddr *sa)
1467 struct sockaddr_dl *sdl;
1469 struct sockaddr_in *sin;
1472 struct sockaddr_in6 *sin6;
1476 switch(sa->sa_family) {
1479 * No mapping needed. Just check that it's a valid MC address.
1481 sdl = (struct sockaddr_dl *)sa;
1482 e_addr = LLADDR(sdl);
1483 if (!IPOIB_IS_MULTICAST(e_addr))
1484 return EADDRNOTAVAIL;
1490 sin = (struct sockaddr_in *)sa;
1491 if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
1492 return EADDRNOTAVAIL;
1493 sdl = malloc(sizeof *sdl, M_IFMADDR,
1497 sdl->sdl_len = sizeof *sdl;
1498 sdl->sdl_family = AF_LINK;
1499 sdl->sdl_index = ifp->if_index;
1500 sdl->sdl_type = IFT_INFINIBAND;
1501 sdl->sdl_alen = INFINIBAND_ALEN;
1502 e_addr = LLADDR(sdl);
1503 ip_ib_mc_map(sin->sin_addr.s_addr, ifp->if_broadcastaddr,
1505 *llsa = (struct sockaddr *)sdl;
1510 sin6 = (struct sockaddr_in6 *)sa;
1512 * An IP6 address of 0 means listen to all
1513 * of the multicast address used for IP6.
1514 * This has no meaning in ipoib.
1516 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
1517 return EADDRNOTAVAIL;
1518 if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
1519 return EADDRNOTAVAIL;
1520 sdl = malloc(sizeof *sdl, M_IFMADDR,
1524 sdl->sdl_len = sizeof *sdl;
1525 sdl->sdl_family = AF_LINK;
1526 sdl->sdl_index = ifp->if_index;
1527 sdl->sdl_type = IFT_INFINIBAND;
1528 sdl->sdl_alen = INFINIBAND_ALEN;
1529 e_addr = LLADDR(sdl);
1530 ipv6_ib_mc_map(&sin6->sin6_addr, ifp->if_broadcastaddr, e_addr);
1531 *llsa = (struct sockaddr *)sdl;
1536 return EAFNOSUPPORT;
1540 module_init(ipoib_init_module);
1541 module_exit(ipoib_cleanup_module);