2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 static int ipoib_resolvemulti(struct ifnet *, struct sockaddr **,
41 #include <linux/module.h>
43 #include <linux/slab.h>
44 #include <linux/kernel.h>
45 #include <linux/vmalloc.h>
47 #include <linux/if_arp.h> /* For ARPHRD_xxx */
48 #include <linux/if_vlan.h>
52 MODULE_AUTHOR("Roland Dreier");
53 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
54 MODULE_LICENSE("Dual BSD/GPL");
56 int ipoib_sendq_size = IPOIB_TX_RING_SIZE;
57 int ipoib_recvq_size = IPOIB_RX_RING_SIZE;
59 module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
60 MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
61 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
62 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
64 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
65 int ipoib_debug_level = 1;
67 module_param_named(debug_level, ipoib_debug_level, int, 0644);
68 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
71 struct ipoib_path_iter {
72 struct ipoib_dev_priv *priv;
73 struct ipoib_path path;
76 static const u8 ipv4_bcast_addr[] = {
77 0x00, 0xff, 0xff, 0xff,
78 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
82 struct workqueue_struct *ipoib_workqueue;
84 struct ib_sa_client ipoib_sa_client;
86 static void ipoib_add_one(struct ib_device *device);
87 static void ipoib_remove_one(struct ib_device *device);
88 static void ipoib_start(struct ifnet *dev);
89 static int ipoib_output(struct ifnet *ifp, struct mbuf *m,
90 const struct sockaddr *dst, struct route *ro);
91 static int ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
92 static void ipoib_input(struct ifnet *ifp, struct mbuf *m);
94 #define IPOIB_MTAP(_ifp, _m) \
96 if (bpf_peers_present((_ifp)->if_bpf)) { \
98 ipoib_mtap_mb((_ifp), (_m)); \
103 * This is for clients that have an ipoib_header in the mbuf.
106 ipoib_mtap_mb(struct ifnet *ifp, struct mbuf *mb)
108 struct ipoib_header *ih;
109 struct ether_header eh;
111 ih = mtod(mb, struct ipoib_header *);
112 eh.ether_type = ih->proto;
113 bcopy(ih->hwaddr, &eh.ether_dhost, ETHER_ADDR_LEN);
114 bzero(&eh.ether_shost, ETHER_ADDR_LEN);
115 mb->m_data += sizeof(struct ipoib_header);
116 mb->m_len -= sizeof(struct ipoib_header);
117 bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb);
118 mb->m_data -= sizeof(struct ipoib_header);
119 mb->m_len += sizeof(struct ipoib_header);
123 ipoib_mtap_proto(struct ifnet *ifp, struct mbuf *mb, uint16_t proto)
125 struct ether_header eh;
127 eh.ether_type = proto;
128 bzero(&eh.ether_shost, ETHER_ADDR_LEN);
129 bzero(&eh.ether_dhost, ETHER_ADDR_LEN);
130 bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb);
133 static struct ib_client ipoib_client = {
135 .add = ipoib_add_one,
136 .remove = ipoib_remove_one
140 ipoib_open(struct ipoib_dev_priv *priv)
142 struct ifnet *dev = priv->dev;
144 ipoib_dbg(priv, "bringing up interface\n");
146 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
148 if (ipoib_pkey_dev_delay_open(priv))
151 if (ipoib_ib_dev_open(priv))
154 if (ipoib_ib_dev_up(priv))
157 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
158 struct ipoib_dev_priv *cpriv;
160 /* Bring up any child interfaces too */
161 mutex_lock(&priv->vlan_mutex);
162 list_for_each_entry(cpriv, &priv->child_intfs, list)
163 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) == 0)
165 mutex_unlock(&priv->vlan_mutex);
167 dev->if_drv_flags |= IFF_DRV_RUNNING;
168 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
173 ipoib_ib_dev_stop(priv, 1);
176 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
182 ipoib_init(void *arg)
185 struct ipoib_dev_priv *priv;
189 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0)
191 queue_work(ipoib_workqueue, &priv->flush_light);
196 ipoib_stop(struct ipoib_dev_priv *priv)
198 struct ifnet *dev = priv->dev;
200 ipoib_dbg(priv, "stopping interface\n");
202 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
204 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
206 ipoib_ib_dev_down(priv, 0);
207 ipoib_ib_dev_stop(priv, 0);
209 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
210 struct ipoib_dev_priv *cpriv;
212 /* Bring down any child interfaces too */
213 mutex_lock(&priv->vlan_mutex);
214 list_for_each_entry(cpriv, &priv->child_intfs, list)
215 if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) != 0)
217 mutex_unlock(&priv->vlan_mutex);
224 ipoib_change_mtu(struct ipoib_dev_priv *priv, int new_mtu)
226 struct ifnet *dev = priv->dev;
228 /* dev->if_mtu > 2K ==> connected mode */
229 if (ipoib_cm_admin_enabled(priv)) {
230 if (new_mtu > IPOIB_CM_MTU(ipoib_cm_max_mtu(priv)))
233 if (new_mtu > priv->mcast_mtu)
234 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
237 dev->if_mtu = new_mtu;
241 if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
244 priv->admin_mtu = new_mtu;
246 dev->if_mtu = min(priv->mcast_mtu, priv->admin_mtu);
248 queue_work(ipoib_workqueue, &priv->flush_light);
254 ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
256 struct ipoib_dev_priv *priv = ifp->if_softc;
257 struct ifaddr *ifa = (struct ifaddr *) data;
258 struct ifreq *ifr = (struct ifreq *) data;
261 /* check if detaching */
262 if (priv == NULL || priv->gone != 0)
267 if (ifp->if_flags & IFF_UP) {
268 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
269 error = -ipoib_open(priv);
271 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
276 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
277 queue_work(ipoib_workqueue, &priv->restart_task);
280 ifp->if_flags |= IFF_UP;
282 switch (ifa->ifa_addr->sa_family) {
285 ifp->if_init(ifp->if_softc); /* before arpwhohas */
286 arp_ifinit(ifp, ifa);
290 ifp->if_init(ifp->if_softc);
296 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0],
302 * Set the interface MTU.
304 error = -ipoib_change_mtu(priv, ifr->ifr_mtu);
314 static struct ipoib_path *
315 __path_find(struct ipoib_dev_priv *priv, void *gid)
317 struct rb_node *n = priv->path_tree.rb_node;
318 struct ipoib_path *path;
322 path = rb_entry(n, struct ipoib_path, rb_node);
324 ret = memcmp(gid, path->pathrec.dgid.raw,
325 sizeof (union ib_gid));
339 __path_add(struct ipoib_dev_priv *priv, struct ipoib_path *path)
341 struct rb_node **n = &priv->path_tree.rb_node;
342 struct rb_node *pn = NULL;
343 struct ipoib_path *tpath;
348 tpath = rb_entry(pn, struct ipoib_path, rb_node);
350 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
351 sizeof (union ib_gid));
360 rb_link_node(&path->rb_node, pn, n);
361 rb_insert_color(&path->rb_node, &priv->path_tree);
363 list_add_tail(&path->list, &priv->path_list);
369 ipoib_path_free(struct ipoib_dev_priv *priv, struct ipoib_path *path)
372 _IF_DRAIN(&path->queue);
375 ipoib_put_ah(path->ah);
376 if (ipoib_cm_get(path))
377 ipoib_cm_destroy_tx(ipoib_cm_get(path));
382 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
384 struct ipoib_path_iter *
385 ipoib_path_iter_init(struct ipoib_dev_priv *priv)
387 struct ipoib_path_iter *iter;
389 iter = kmalloc(sizeof *iter, GFP_KERNEL);
394 memset(iter->path.pathrec.dgid.raw, 0, 16);
396 if (ipoib_path_iter_next(iter)) {
405 ipoib_path_iter_next(struct ipoib_path_iter *iter)
407 struct ipoib_dev_priv *priv = iter->priv;
409 struct ipoib_path *path;
412 spin_lock_irq(&priv->lock);
414 n = rb_first(&priv->path_tree);
417 path = rb_entry(n, struct ipoib_path, rb_node);
419 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
420 sizeof (union ib_gid)) < 0) {
429 spin_unlock_irq(&priv->lock);
435 ipoib_path_iter_read(struct ipoib_path_iter *iter, struct ipoib_path *path)
440 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
443 ipoib_mark_paths_invalid(struct ipoib_dev_priv *priv)
445 struct ipoib_path *path, *tp;
447 spin_lock_irq(&priv->lock);
449 list_for_each_entry_safe(path, tp, &priv->path_list, list) {
450 ipoib_dbg(priv, "mark path LID 0x%04x GID %16D invalid\n",
451 be16_to_cpu(path->pathrec.dlid),
452 path->pathrec.dgid.raw, ":");
456 spin_unlock_irq(&priv->lock);
460 ipoib_flush_paths(struct ipoib_dev_priv *priv)
462 struct ipoib_path *path, *tp;
463 LIST_HEAD(remove_list);
466 spin_lock_irqsave(&priv->lock, flags);
468 list_splice_init(&priv->path_list, &remove_list);
470 list_for_each_entry(path, &remove_list, list)
471 rb_erase(&path->rb_node, &priv->path_tree);
473 list_for_each_entry_safe(path, tp, &remove_list, list) {
475 ib_sa_cancel_query(path->query_id, path->query);
476 spin_unlock_irqrestore(&priv->lock, flags);
477 wait_for_completion(&path->done);
478 ipoib_path_free(priv, path);
479 spin_lock_irqsave(&priv->lock, flags);
482 spin_unlock_irqrestore(&priv->lock, flags);
486 path_rec_completion(int status, struct ib_sa_path_rec *pathrec, void *path_ptr)
488 struct ipoib_path *path = path_ptr;
489 struct ipoib_dev_priv *priv = path->priv;
490 struct ifnet *dev = priv->dev;
491 struct ipoib_ah *ah = NULL;
492 struct ipoib_ah *old_ah = NULL;
493 struct ifqueue mbqueue;
498 ipoib_dbg(priv, "PathRec LID 0x%04x for GID %16D\n",
499 be16_to_cpu(pathrec->dlid), pathrec->dgid.raw, ":");
501 ipoib_dbg(priv, "PathRec status %d for GID %16D\n",
502 status, path->pathrec.dgid.raw, ":");
504 bzero(&mbqueue, sizeof(mbqueue));
507 struct ib_ah_attr av;
509 if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
510 ah = ipoib_create_ah(priv, priv->pd, &av);
513 spin_lock_irqsave(&priv->lock, flags);
516 path->pathrec = *pathrec;
521 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
522 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
525 _IF_DEQUEUE(&path->queue, mb);
528 _IF_ENQUEUE(&mbqueue, mb);
531 #ifdef CONFIG_INFINIBAND_IPOIB_CM
532 if (ipoib_cm_enabled(priv, path->hwaddr) && !ipoib_cm_get(path))
533 ipoib_cm_set(path, ipoib_cm_create_tx(priv, path));
540 complete(&path->done);
542 spin_unlock_irqrestore(&priv->lock, flags);
545 ipoib_put_ah(old_ah);
548 _IF_DEQUEUE(&mbqueue, mb);
551 mb->m_pkthdr.rcvif = dev;
552 if (dev->if_transmit(dev, mb))
553 ipoib_warn(priv, "dev_queue_xmit failed "
554 "to requeue packet\n");
558 static struct ipoib_path *
559 path_rec_create(struct ipoib_dev_priv *priv, uint8_t *hwaddr)
561 struct ipoib_path *path;
563 if (!priv->broadcast)
566 path = kzalloc(sizeof *path, GFP_ATOMIC);
572 bzero(&path->queue, sizeof(path->queue));
574 #ifdef CONFIG_INFINIBAND_IPOIB_CM
575 memcpy(&path->hwaddr, hwaddr, INFINIBAND_ALEN);
577 memcpy(path->pathrec.dgid.raw, &hwaddr[4], sizeof (union ib_gid));
578 path->pathrec.sgid = priv->local_gid;
579 path->pathrec.pkey = cpu_to_be16(priv->pkey);
580 path->pathrec.numb_path = 1;
581 path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
587 path_rec_start(struct ipoib_dev_priv *priv, struct ipoib_path *path)
589 struct ifnet *dev = priv->dev;
591 ib_sa_comp_mask comp_mask = IB_SA_PATH_REC_MTU_SELECTOR | IB_SA_PATH_REC_MTU;
592 struct ib_sa_path_rec p_rec;
594 p_rec = path->pathrec;
595 p_rec.mtu_selector = IB_SA_GT;
597 switch (roundup_pow_of_two(dev->if_mtu + IPOIB_ENCAP_LEN)) {
599 p_rec.mtu = IB_MTU_256;
602 p_rec.mtu = IB_MTU_512;
605 p_rec.mtu = IB_MTU_1024;
608 p_rec.mtu = IB_MTU_2048;
611 /* Wildcard everything */
614 p_rec.mtu_selector = 0;
617 ipoib_dbg(priv, "Start path record lookup for %16D MTU > %d\n",
619 comp_mask ? ib_mtu_enum_to_int(p_rec.mtu) : 0);
621 init_completion(&path->done);
624 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
626 IB_SA_PATH_REC_DGID |
627 IB_SA_PATH_REC_SGID |
628 IB_SA_PATH_REC_NUMB_PATH |
629 IB_SA_PATH_REC_TRAFFIC_CLASS |
634 if (path->query_id < 0) {
635 ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
637 complete(&path->done);
638 return path->query_id;
645 ipoib_unicast_send(struct mbuf *mb, struct ipoib_dev_priv *priv, struct ipoib_header *eh)
647 struct ipoib_path *path;
649 path = __path_find(priv, eh->hwaddr + 4);
650 if (!path || !path->valid) {
654 path = path_rec_create(priv, eh->hwaddr);
658 _IF_ENQUEUE(&path->queue, mb);
659 if (!path->query && path_rec_start(priv, path)) {
661 ipoib_path_free(priv, path);
664 __path_add(priv, path);
666 ++priv->dev->if_oerrors;
673 if (ipoib_cm_get(path) && ipoib_cm_up(path)) {
674 ipoib_cm_send(priv, mb, ipoib_cm_get(path));
675 } else if (path->ah) {
676 ipoib_send(priv, mb, path->ah, IPOIB_QPN(eh->hwaddr));
677 } else if ((path->query || !path_rec_start(priv, path)) &&
678 path->queue.ifq_len < IPOIB_MAX_PATH_REC_QUEUE) {
679 _IF_ENQUEUE(&path->queue, mb);
681 ++priv->dev->if_oerrors;
687 ipoib_send_one(struct ipoib_dev_priv *priv, struct mbuf *mb)
689 struct ipoib_header *eh;
691 eh = mtod(mb, struct ipoib_header *);
692 if (IPOIB_IS_MULTICAST(eh->hwaddr)) {
693 /* Add in the P_Key for multicast*/
694 eh->hwaddr[8] = (priv->pkey >> 8) & 0xff;
695 eh->hwaddr[9] = priv->pkey & 0xff;
697 ipoib_mcast_send(priv, eh->hwaddr + 4, mb);
699 ipoib_unicast_send(mb, priv, eh);
706 _ipoib_start(struct ifnet *dev, struct ipoib_dev_priv *priv)
710 if ((dev->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
714 spin_lock(&priv->lock);
715 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd) &&
716 (dev->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
717 IFQ_DRV_DEQUEUE(&dev->if_snd, mb);
721 ipoib_send_one(priv, mb);
723 spin_unlock(&priv->lock);
727 ipoib_start(struct ifnet *dev)
729 _ipoib_start(dev, dev->if_softc);
733 ipoib_vlan_start(struct ifnet *dev)
735 struct ipoib_dev_priv *priv;
738 priv = VLAN_COOKIE(dev);
740 return _ipoib_start(dev, priv);
741 while (!IFQ_DRV_IS_EMPTY(&dev->if_snd)) {
742 IFQ_DRV_DEQUEUE(&dev->if_snd, mb);
751 ipoib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port)
754 /* Allocate RX/TX "rings" to hold queued mbs */
755 priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
757 if (!priv->rx_ring) {
758 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
759 ca->name, ipoib_recvq_size);
763 priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, GFP_KERNEL);
764 if (!priv->tx_ring) {
765 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
766 ca->name, ipoib_sendq_size);
767 goto out_rx_ring_cleanup;
769 memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring);
771 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
773 if (ipoib_ib_dev_init(priv, ca, port))
774 goto out_tx_ring_cleanup;
779 kfree(priv->tx_ring);
782 kfree(priv->rx_ring);
789 ipoib_detach(struct ipoib_dev_priv *priv)
794 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
800 VLAN_SETCOOKIE(priv->dev, NULL);
806 ipoib_dev_cleanup(struct ipoib_dev_priv *priv)
808 struct ipoib_dev_priv *cpriv, *tcpriv;
810 /* Delete any child interfaces first */
811 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
812 ipoib_dev_cleanup(cpriv);
816 ipoib_ib_dev_cleanup(priv);
818 kfree(priv->rx_ring);
819 kfree(priv->tx_ring);
821 priv->rx_ring = NULL;
822 priv->tx_ring = NULL;
825 static volatile int ipoib_unit;
827 static struct ipoib_dev_priv *
828 ipoib_priv_alloc(void)
830 struct ipoib_dev_priv *priv;
832 priv = malloc(sizeof(struct ipoib_dev_priv), M_TEMP, M_ZERO|M_WAITOK);
833 spin_lock_init(&priv->lock);
834 mutex_init(&priv->vlan_mutex);
835 INIT_LIST_HEAD(&priv->path_list);
836 INIT_LIST_HEAD(&priv->child_intfs);
837 INIT_LIST_HEAD(&priv->dead_ahs);
838 INIT_LIST_HEAD(&priv->multicast_list);
839 INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
840 INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
841 INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
842 INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
843 INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
844 INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
845 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
846 INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
847 memcpy(priv->broadcastaddr, ipv4_bcast_addr, INFINIBAND_ALEN);
852 struct ipoib_dev_priv *
853 ipoib_intf_alloc(const char *name)
855 struct ipoib_dev_priv *priv;
856 struct sockaddr_dl *sdl;
859 priv = ipoib_priv_alloc();
860 dev = priv->dev = if_alloc(IFT_INFINIBAND);
865 dev->if_softc = priv;
866 if_initname(dev, name, atomic_fetchadd_int(&ipoib_unit, 1));
867 dev->if_flags = IFF_BROADCAST | IFF_MULTICAST;
868 dev->if_addrlen = INFINIBAND_ALEN;
869 dev->if_hdrlen = IPOIB_HEADER_LEN;
871 dev->if_init = ipoib_init;
872 dev->if_ioctl = ipoib_ioctl;
873 dev->if_start = ipoib_start;
874 dev->if_output = ipoib_output;
875 dev->if_input = ipoib_input;
876 dev->if_resolvemulti = ipoib_resolvemulti;
877 if_initbaudrate(dev, IF_Gbps(10));
878 dev->if_broadcastaddr = priv->broadcastaddr;
879 dev->if_snd.ifq_maxlen = ipoib_sendq_size * 2;
880 sdl = (struct sockaddr_dl *)dev->if_addr->ifa_addr;
881 sdl->sdl_type = IFT_INFINIBAND;
882 sdl->sdl_alen = dev->if_addrlen;
884 if_link_state_change(dev, LINK_STATE_DOWN);
885 bpfattach(dev, DLT_EN10MB, ETHER_HDR_LEN);
887 return dev->if_softc;
891 ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
893 struct ib_device_attr *device_attr;
894 int result = -ENOMEM;
896 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
898 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
899 hca->name, sizeof *device_attr);
903 result = ib_query_device(hca, device_attr);
905 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
910 priv->hca_caps = device_attr->device_cap_flags;
914 priv->dev->if_hwassist = 0;
915 priv->dev->if_capabilities = 0;
917 #ifndef CONFIG_INFINIBAND_IPOIB_CM
918 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
919 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
920 priv->dev->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
921 priv->dev->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
925 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) {
926 priv->dev->if_capabilities |= IFCAP_TSO4;
927 priv->dev->if_hwassist |= CSUM_TSO;
931 priv->dev->if_capabilities |=
932 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_LINKSTATE;
933 priv->dev->if_capenable = priv->dev->if_capabilities;
939 static struct ifnet *
940 ipoib_add_port(const char *format, struct ib_device *hca, u8 port)
942 struct ipoib_dev_priv *priv;
943 struct ib_port_attr attr;
944 int result = -ENOMEM;
946 priv = ipoib_intf_alloc(format);
948 goto alloc_mem_failed;
950 if (!ib_query_port(hca, port, &attr))
951 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
953 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
955 goto device_init_failed;
958 /* MTU will be reset when mcast join happens */
959 priv->dev->if_mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
960 priv->mcast_mtu = priv->admin_mtu = priv->dev->if_mtu;
962 result = ib_query_pkey(hca, port, 0, &priv->pkey);
964 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
965 hca->name, port, result);
966 goto device_init_failed;
969 if (ipoib_set_dev_features(priv, hca))
970 goto device_init_failed;
973 * Set the full membership bit, so that we join the right
974 * broadcast group, etc.
976 priv->pkey |= 0x8000;
978 priv->broadcastaddr[8] = priv->pkey >> 8;
979 priv->broadcastaddr[9] = priv->pkey & 0xff;
981 result = ib_query_gid(hca, port, 0, &priv->local_gid);
983 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
984 hca->name, port, result);
985 goto device_init_failed;
987 memcpy(IF_LLADDR(priv->dev) + 4, priv->local_gid.raw, sizeof (union ib_gid));
989 result = ipoib_dev_init(priv, hca, port);
991 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
992 hca->name, port, result);
993 goto device_init_failed;
995 if (ipoib_cm_admin_enabled(priv))
996 priv->dev->if_mtu = IPOIB_CM_MTU(ipoib_cm_max_mtu(priv));
998 INIT_IB_EVENT_HANDLER(&priv->event_handler,
999 priv->ca, ipoib_event);
1000 result = ib_register_event_handler(&priv->event_handler);
1002 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1003 "port %d (ret = %d)\n",
1004 hca->name, port, result);
1007 if_printf(priv->dev, "Attached to %s port %d\n", hca->name, port);
1012 ipoib_dev_cleanup(priv);
1018 return ERR_PTR(result);
1022 ipoib_add_one(struct ib_device *device)
1024 struct list_head *dev_list;
1026 struct ipoib_dev_priv *priv;
1029 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1032 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1036 INIT_LIST_HEAD(dev_list);
1038 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1043 e = device->phys_port_cnt;
1046 for (p = s; p <= e; ++p) {
1047 if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
1049 dev = ipoib_add_port("ib", device, p);
1051 priv = dev->if_softc;
1052 list_add_tail(&priv->list, dev_list);
1056 ib_set_client_data(device, &ipoib_client, dev_list);
1060 ipoib_remove_one(struct ib_device *device)
1062 struct ipoib_dev_priv *priv, *tmp;
1063 struct list_head *dev_list;
1065 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1068 dev_list = ib_get_client_data(device, &ipoib_client);
1070 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1071 if (rdma_port_get_link_layer(device, priv->port) != IB_LINK_LAYER_INFINIBAND)
1076 ib_unregister_event_handler(&priv->event_handler);
1078 /* dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); */
1080 flush_workqueue(ipoib_workqueue);
1082 ipoib_dev_cleanup(priv);
1090 ipoib_config_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
1092 struct ipoib_dev_priv *parent;
1093 struct ipoib_dev_priv *priv;
1098 if (ifp->if_type != IFT_INFINIBAND)
1100 dev = VLAN_DEVAT(ifp, vtag);
1105 parent = ifp->if_softc;
1106 /* We only support 15 bits of pkey. */
1109 pkey = vtag | 0x8000; /* Set full membership bit. */
1110 if (pkey == parent->pkey)
1112 /* Check for dups */
1113 mutex_lock(&parent->vlan_mutex);
1114 list_for_each_entry(priv, &parent->child_intfs, list) {
1115 if (priv->pkey == pkey) {
1121 priv = ipoib_priv_alloc();
1123 priv->max_ib_mtu = parent->max_ib_mtu;
1124 priv->mcast_mtu = priv->admin_mtu = parent->dev->if_mtu;
1125 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
1126 error = ipoib_set_dev_features(priv, parent->ca);
1130 priv->broadcastaddr[8] = pkey >> 8;
1131 priv->broadcastaddr[9] = pkey & 0xff;
1132 dev->if_broadcastaddr = priv->broadcastaddr;
1133 error = ipoib_dev_init(priv, parent->ca, parent->port);
1136 priv->parent = parent->dev;
1137 list_add_tail(&priv->list, &parent->child_intfs);
1138 VLAN_SETCOOKIE(dev, priv);
1139 dev->if_start = ipoib_vlan_start;
1140 dev->if_drv_flags &= ~IFF_DRV_RUNNING;
1141 dev->if_hdrlen = IPOIB_HEADER_LEN;
1142 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1144 mutex_unlock(&parent->vlan_mutex);
1147 mutex_unlock(&parent->vlan_mutex);
1152 "failed to initialize subinterface: device %s, port %d vtag 0x%X",
1153 parent->ca->name, parent->port, vtag);
1158 ipoib_unconfig_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
1160 struct ipoib_dev_priv *parent;
1161 struct ipoib_dev_priv *priv;
1165 if (ifp->if_type != IFT_INFINIBAND)
1168 dev = VLAN_DEVAT(ifp, vtag);
1170 VLAN_SETCOOKIE(dev, NULL);
1171 pkey = vtag | 0x8000;
1172 parent = ifp->if_softc;
1173 mutex_lock(&parent->vlan_mutex);
1174 list_for_each_entry(priv, &parent->child_intfs, list) {
1175 if (priv->pkey == pkey) {
1176 ipoib_dev_cleanup(priv);
1177 list_del(&priv->list);
1181 mutex_unlock(&parent->vlan_mutex);
1184 eventhandler_tag ipoib_vlan_attach;
1185 eventhandler_tag ipoib_vlan_detach;
1188 ipoib_init_module(void)
1192 ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1193 ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1194 ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1196 ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1197 ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1198 ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE,
1199 IPOIB_MIN_QUEUE_SIZE));
1200 #ifdef CONFIG_INFINIBAND_IPOIB_CM
1201 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1204 ipoib_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1205 ipoib_config_vlan, NULL, EVENTHANDLER_PRI_FIRST);
1206 ipoib_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
1207 ipoib_unconfig_vlan, NULL, EVENTHANDLER_PRI_FIRST);
1210 * We create our own workqueue mainly because we want to be
1211 * able to flush it when devices are being removed. We can't
1212 * use schedule_work()/flush_scheduled_work() because both
1213 * unregister_netdev() and linkwatch_event take the rtnl lock,
1214 * so flush_scheduled_work() can deadlock during device
1217 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1218 if (!ipoib_workqueue) {
1223 ib_sa_register_client(&ipoib_sa_client);
1225 ret = ib_register_client(&ipoib_client);
1232 ib_sa_unregister_client(&ipoib_sa_client);
1233 destroy_workqueue(ipoib_workqueue);
1240 ipoib_cleanup_module(void)
1243 EVENTHANDLER_DEREGISTER(vlan_config, ipoib_vlan_attach);
1244 EVENTHANDLER_DEREGISTER(vlan_unconfig, ipoib_vlan_detach);
1245 ib_unregister_client(&ipoib_client);
1246 ib_sa_unregister_client(&ipoib_sa_client);
1247 destroy_workqueue(ipoib_workqueue);
1251 * Infiniband output routine.
1254 ipoib_output(struct ifnet *ifp, struct mbuf *m,
1255 const struct sockaddr *dst, struct route *ro)
1257 u_char edst[INFINIBAND_ALEN];
1258 struct llentry *lle = NULL;
1259 struct rtentry *rt0 = NULL;
1260 struct ipoib_header *eh;
1265 if (!(m->m_flags & (M_BCAST | M_MCAST)))
1270 error = mac_ifnet_check_transmit(ifp, m);
1276 if (ifp->if_flags & IFF_MONITOR) {
1280 if (!((ifp->if_flags & IFF_UP) &&
1281 (ifp->if_drv_flags & IFF_DRV_RUNNING))) {
1286 switch (dst->sa_family) {
1289 if (lle != NULL && (lle->la_flags & LLE_VALID))
1290 memcpy(edst, &lle->ll_addr.mac8, sizeof(edst));
1291 else if (m->m_flags & M_MCAST)
1292 ip_ib_mc_map(((struct sockaddr_in *)dst)->sin_addr.s_addr, ifp->if_broadcastaddr, edst);
1294 error = arpresolve(ifp, rt0, m, dst, edst, &lle);
1296 return (error == EWOULDBLOCK ? 0 : error);
1297 type = htons(ETHERTYPE_IP);
1302 ah = mtod(m, struct arphdr *);
1303 ah->ar_hrd = htons(ARPHRD_INFINIBAND);
1305 switch(ntohs(ah->ar_op)) {
1306 case ARPOP_REVREQUEST:
1307 case ARPOP_REVREPLY:
1308 type = htons(ETHERTYPE_REVARP);
1313 type = htons(ETHERTYPE_ARP);
1317 if (m->m_flags & M_BCAST)
1318 bcopy(ifp->if_broadcastaddr, edst, INFINIBAND_ALEN);
1320 bcopy(ar_tha(ah), edst, INFINIBAND_ALEN);
1327 if (lle != NULL && (lle->la_flags & LLE_VALID))
1328 memcpy(edst, &lle->ll_addr.mac8, sizeof(edst));
1329 else if (m->m_flags & M_MCAST)
1330 ipv6_ib_mc_map(&((struct sockaddr_in6 *)dst)->sin6_addr, ifp->if_broadcastaddr, edst);
1332 error = nd6_storelladdr(ifp, m, dst, (u_char *)edst, &lle);
1335 type = htons(ETHERTYPE_IPV6);
1340 if_printf(ifp, "can't handle af%d\n", dst->sa_family);
1341 error = EAFNOSUPPORT;
1346 * Add local net header. If no space in first mbuf,
1349 M_PREPEND(m, IPOIB_HEADER_LEN, M_NOWAIT);
1354 eh = mtod(m, struct ipoib_header *);
1355 (void)memcpy(&eh->proto, &type, sizeof(eh->proto));
1356 (void)memcpy(&eh->hwaddr, edst, sizeof (edst));
1359 * Queue message on interface, update output statistics if
1360 * successful, and start output if interface not yet active.
1362 return ((ifp->if_transmit)(ifp, m));
1370 * Upper layer processing for a received Infiniband packet.
1373 ipoib_demux(struct ifnet *ifp, struct mbuf *m, u_short proto)
1379 * Tag the mbuf with an appropriate MAC label before any other
1380 * consumers can get to it.
1382 mac_ifnet_create_mbuf(ifp, m);
1384 /* Allow monitor mode to claim this frame, after stats are updated. */
1385 if (ifp->if_flags & IFF_MONITOR) {
1386 if_printf(ifp, "discard frame at IFF_MONITOR\n");
1391 * Dispatch frame to upper layer.
1400 if (ifp->if_flags & IFF_NOARP) {
1401 /* Discard packet if ARP is disabled on interface */
1409 case ETHERTYPE_IPV6:
1416 netisr_dispatch(isr, m);
1424 * Process a received Infiniband packet.
1427 ipoib_input(struct ifnet *ifp, struct mbuf *m)
1429 struct ipoib_header *eh;
1431 if ((ifp->if_flags & IFF_UP) == 0) {
1435 CURVNET_SET_QUIET(ifp->if_vnet);
1437 /* Let BPF have it before we strip the header. */
1439 eh = mtod(m, struct ipoib_header *);
1441 * Reset layer specific mbuf flags to avoid confusing upper layers.
1442 * Strip off Infiniband header.
1444 m->m_flags &= ~M_VLANTAG;
1446 m_adj(m, IPOIB_HEADER_LEN);
1448 if (IPOIB_IS_MULTICAST(eh->hwaddr)) {
1449 if (memcmp(eh->hwaddr, ifp->if_broadcastaddr,
1450 ifp->if_addrlen) == 0)
1451 m->m_flags |= M_BCAST;
1453 m->m_flags |= M_MCAST;
1457 ipoib_demux(ifp, m, ntohs(eh->proto));
1462 ipoib_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
1463 struct sockaddr *sa)
1465 struct sockaddr_dl *sdl;
1467 struct sockaddr_in *sin;
1470 struct sockaddr_in6 *sin6;
1474 switch(sa->sa_family) {
1477 * No mapping needed. Just check that it's a valid MC address.
1479 sdl = (struct sockaddr_dl *)sa;
1480 e_addr = LLADDR(sdl);
1481 if (!IPOIB_IS_MULTICAST(e_addr))
1482 return EADDRNOTAVAIL;
1488 sin = (struct sockaddr_in *)sa;
1489 if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
1490 return EADDRNOTAVAIL;
1491 sdl = malloc(sizeof *sdl, M_IFMADDR,
1495 sdl->sdl_len = sizeof *sdl;
1496 sdl->sdl_family = AF_LINK;
1497 sdl->sdl_index = ifp->if_index;
1498 sdl->sdl_type = IFT_INFINIBAND;
1499 sdl->sdl_alen = INFINIBAND_ALEN;
1500 e_addr = LLADDR(sdl);
1501 ip_ib_mc_map(sin->sin_addr.s_addr, ifp->if_broadcastaddr,
1503 *llsa = (struct sockaddr *)sdl;
1508 sin6 = (struct sockaddr_in6 *)sa;
1510 * An IP6 address of 0 means listen to all
1511 * of the multicast address used for IP6.
1512 * This has no meaning in ipoib.
1514 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
1515 return EADDRNOTAVAIL;
1516 if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
1517 return EADDRNOTAVAIL;
1518 sdl = malloc(sizeof *sdl, M_IFMADDR,
1522 sdl->sdl_len = sizeof *sdl;
1523 sdl->sdl_family = AF_LINK;
1524 sdl->sdl_index = ifp->if_index;
1525 sdl->sdl_type = IFT_INFINIBAND;
1526 sdl->sdl_alen = INFINIBAND_ALEN;
1527 e_addr = LLADDR(sdl);
1528 ipv6_ib_mc_map(&sin6->sin6_addr, ifp->if_broadcastaddr, e_addr);
1529 *llsa = (struct sockaddr *)sdl;
1534 return EAFNOSUPPORT;
1538 module_init(ipoib_init_module);
1539 module_exit(ipoib_cleanup_module);
1541 #undef MODULE_VERSION
1542 #include <sys/module.h>
1544 ipoib_evhand(module_t mod, int event, void *arg)
1549 static moduledata_t ipoib_mod = {
1551 .evhand = ipoib_evhand,
1554 DECLARE_MODULE(ipoib, ipoib_mod, SI_SUB_SMP, SI_ORDER_ANY);
1555 MODULE_DEPEND(ipoib, ibcore, 1, 1, 1);