2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/delay.h>
38 #include <linux/completion.h>
40 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
41 static int mcast_debug_level = 1;
43 module_param(mcast_debug_level, int, 0644);
44 MODULE_PARM_DESC(mcast_debug_level,
45 "Enable multicast debug tracing if > 0");
48 static DEFINE_MUTEX(mcast_mutex);
50 struct ipoib_mcast_iter {
51 struct ipoib_dev_priv *priv;
53 unsigned long created;
54 unsigned int queuelen;
55 unsigned int complete;
56 unsigned int send_only;
59 static void ipoib_mcast_free(struct ipoib_mcast *mcast)
61 struct ifnet *dev = mcast->priv->dev;
64 ipoib_dbg_mcast(mcast->priv, "deleting multicast group %16D\n",
65 mcast->mcmember.mgid.raw, ":");
68 ipoib_put_ah(mcast->ah);
70 tx_dropped = mcast->pkt_queue.ifq_len;
71 _IF_DRAIN(&mcast->pkt_queue); /* XXX Locking. */
73 if_inc_counter(dev, IFCOUNTER_OERRORS, tx_dropped);
78 static struct ipoib_mcast *ipoib_mcast_alloc(struct ipoib_dev_priv *priv,
81 struct ipoib_mcast *mcast;
83 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
88 mcast->created = jiffies;
91 INIT_LIST_HEAD(&mcast->list);
92 bzero(&mcast->pkt_queue, sizeof(mcast->pkt_queue));
97 static struct ipoib_mcast *__ipoib_mcast_find(struct ipoib_dev_priv *priv,
100 struct rb_node *n = priv->multicast_tree.rb_node;
103 struct ipoib_mcast *mcast;
106 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
108 ret = memcmp(mgid, mcast->mcmember.mgid.raw,
109 sizeof (union ib_gid));
121 static int __ipoib_mcast_add(struct ipoib_dev_priv *priv,
122 struct ipoib_mcast *mcast)
124 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
127 struct ipoib_mcast *tmcast;
131 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
133 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
134 sizeof (union ib_gid));
143 rb_link_node(&mcast->rb_node, pn, n);
144 rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
149 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
150 struct ib_sa_mcmember_rec *mcmember)
152 struct ipoib_dev_priv *priv = mcast->priv;
153 struct ifnet *dev = priv->dev;
158 mcast->mcmember = *mcmember;
160 /* Set the cached Q_Key before we attach if it's the broadcast group */
161 if (!memcmp(mcast->mcmember.mgid.raw, dev->if_broadcastaddr + 4,
162 sizeof (union ib_gid))) {
163 spin_lock_irq(&priv->lock);
164 if (!priv->broadcast) {
165 spin_unlock_irq(&priv->lock);
168 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
169 spin_unlock_irq(&priv->lock);
170 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
174 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
175 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
176 ipoib_warn(priv, "multicast group %16D already attached\n",
177 mcast->mcmember.mgid.raw, ":");
182 ret = ipoib_mcast_attach(priv, be16_to_cpu(mcast->mcmember.mlid),
183 &mcast->mcmember.mgid, set_qkey);
185 ipoib_warn(priv, "couldn't attach QP to multicast group %16D\n",
186 mcast->mcmember.mgid.raw, ":");
188 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
194 struct ib_ah_attr av = {
195 .dlid = be16_to_cpu(mcast->mcmember.mlid),
196 .port_num = priv->port,
197 .sl = mcast->mcmember.sl,
198 .ah_flags = IB_AH_GRH,
199 .static_rate = mcast->mcmember.rate,
201 .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
202 .hop_limit = mcast->mcmember.hop_limit,
204 .traffic_class = mcast->mcmember.traffic_class
207 av.grh.dgid = mcast->mcmember.mgid;
209 ah = ipoib_create_ah(priv, priv->pd, &av);
211 ipoib_warn(priv, "ib_address_create failed\n");
213 spin_lock_irq(&priv->lock);
215 spin_unlock_irq(&priv->lock);
217 ipoib_dbg_mcast(priv, "MGID %16D AV %p, LID 0x%04x, SL %d\n",
218 mcast->mcmember.mgid.raw, ":",
220 be16_to_cpu(mcast->mcmember.mlid),
225 /* actually send any queued packets */
226 while (mcast->pkt_queue.ifq_len) {
228 _IF_DEQUEUE(&mcast->pkt_queue, mb);
229 mb->m_pkthdr.rcvif = dev;
231 if (dev->if_transmit(dev, mb))
232 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
239 ipoib_mcast_sendonly_join_complete(int status,
240 struct ib_sa_multicast *multicast)
242 struct ipoib_mcast *mcast = multicast->context;
243 struct ipoib_dev_priv *priv = mcast->priv;
245 /* We trap for port events ourselves. */
246 if (status == -ENETRESET)
250 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
253 if (mcast->logcount++ < 20)
254 ipoib_dbg_mcast(priv, "multicast join failed for %16D, status %d\n",
255 mcast->mcmember.mgid.raw, ":", status);
257 /* Flush out any queued packets */
258 if_inc_counter(priv->dev, IFCOUNTER_OERRORS, mcast->pkt_queue.ifq_len);
259 _IF_DRAIN(&mcast->pkt_queue);
261 /* Clear the busy flag so we try again */
262 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
268 static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
270 struct ipoib_dev_priv *priv = mcast->priv;
271 struct ib_sa_mcmember_rec rec = {
272 #if 0 /* Some SMs don't support send-only yet */
280 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
281 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
285 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
286 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
290 rec.mgid = mcast->mcmember.mgid;
291 rec.port_gid = priv->local_gid;
292 rec.pkey = cpu_to_be16(priv->pkey);
294 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
296 IB_SA_MCMEMBER_REC_MGID |
297 IB_SA_MCMEMBER_REC_PORT_GID |
298 IB_SA_MCMEMBER_REC_PKEY |
299 IB_SA_MCMEMBER_REC_JOIN_STATE,
301 ipoib_mcast_sendonly_join_complete,
303 if (IS_ERR(mcast->mc)) {
304 ret = PTR_ERR(mcast->mc);
305 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
306 ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
309 ipoib_dbg_mcast(priv, "no multicast record for %16D, starting join\n",
310 mcast->mcmember.mgid.raw, ":");
316 void ipoib_mcast_carrier_on_task(struct work_struct *work)
318 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
320 struct ib_port_attr attr;
323 * Take rtnl_lock to avoid racing with ipoib_stop() and
324 * turning the carrier back on while a device is being
327 if (ib_query_port(priv->ca, priv->port, &attr) ||
328 attr.state != IB_PORT_ACTIVE) {
329 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
332 if_link_state_change(priv->dev, LINK_STATE_UP);
335 static int ipoib_mcast_join_complete(int status,
336 struct ib_sa_multicast *multicast)
338 struct ipoib_mcast *mcast = multicast->context;
339 struct ipoib_dev_priv *priv = mcast->priv;
341 ipoib_dbg_mcast(priv, "join completion for %16D (status %d)\n",
342 mcast->mcmember.mgid.raw, ":", status);
344 /* We trap for port events ourselves. */
345 if (status == -ENETRESET)
349 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
353 mutex_lock(&mcast_mutex);
354 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
355 queue_delayed_work(ipoib_workqueue,
356 &priv->mcast_task, 0);
357 mutex_unlock(&mcast_mutex);
360 * Defer carrier on work to ipoib_workqueue to avoid a
361 * deadlock on rtnl_lock here.
363 if (mcast == priv->broadcast)
364 queue_work(ipoib_workqueue, &priv->carrier_on_task);
369 if (mcast->logcount++ < 20) {
370 if (status == -ETIMEDOUT || status == -EAGAIN) {
371 ipoib_dbg_mcast(priv, "multicast join failed for %16D, status %d\n",
372 mcast->mcmember.mgid.raw, ":", status);
374 ipoib_warn(priv, "multicast join failed for %16D, status %d\n",
375 mcast->mcmember.mgid.raw, ":", status);
380 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
381 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
383 /* Clear the busy flag so we try again */
384 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
386 mutex_lock(&mcast_mutex);
387 spin_lock_irq(&priv->lock);
388 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
389 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
390 mcast->backoff * HZ);
391 spin_unlock_irq(&priv->lock);
392 mutex_unlock(&mcast_mutex);
397 static void ipoib_mcast_join(struct ipoib_dev_priv *priv,
398 struct ipoib_mcast *mcast, int create)
400 struct ib_sa_mcmember_rec rec = {
403 ib_sa_comp_mask comp_mask;
406 ipoib_dbg_mcast(priv, "joining MGID %16D\n",
407 mcast->mcmember.mgid.raw, ":");
409 rec.mgid = mcast->mcmember.mgid;
410 rec.port_gid = priv->local_gid;
411 rec.pkey = cpu_to_be16(priv->pkey);
414 IB_SA_MCMEMBER_REC_MGID |
415 IB_SA_MCMEMBER_REC_PORT_GID |
416 IB_SA_MCMEMBER_REC_PKEY |
417 IB_SA_MCMEMBER_REC_JOIN_STATE;
421 IB_SA_MCMEMBER_REC_QKEY |
422 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
423 IB_SA_MCMEMBER_REC_MTU |
424 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
425 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
426 IB_SA_MCMEMBER_REC_RATE |
427 IB_SA_MCMEMBER_REC_SL |
428 IB_SA_MCMEMBER_REC_FLOW_LABEL |
429 IB_SA_MCMEMBER_REC_HOP_LIMIT;
431 rec.qkey = priv->broadcast->mcmember.qkey;
432 rec.mtu_selector = IB_SA_EQ;
433 rec.mtu = priv->broadcast->mcmember.mtu;
434 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
435 rec.rate_selector = IB_SA_EQ;
436 rec.rate = priv->broadcast->mcmember.rate;
437 rec.sl = priv->broadcast->mcmember.sl;
438 rec.flow_label = priv->broadcast->mcmember.flow_label;
439 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
442 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
443 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
444 &rec, comp_mask, GFP_KERNEL,
445 ipoib_mcast_join_complete, mcast);
446 if (IS_ERR(mcast->mc)) {
447 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
448 ret = PTR_ERR(mcast->mc);
449 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
452 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
453 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
455 mutex_lock(&mcast_mutex);
456 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
457 queue_delayed_work(ipoib_workqueue,
459 mcast->backoff * HZ);
460 mutex_unlock(&mcast_mutex);
464 void ipoib_mcast_join_task(struct work_struct *work)
466 struct ipoib_dev_priv *priv =
467 container_of(work, struct ipoib_dev_priv, mcast_task.work);
468 struct ifnet *dev = priv->dev;
470 ipoib_dbg_mcast(priv, "Running join task. flags 0x%lX\n", priv->flags);
472 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
475 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
476 ipoib_warn(priv, "ib_query_gid() failed\n");
478 memcpy(IF_LLADDR(dev) + 4, priv->local_gid.raw, sizeof (union ib_gid));
481 struct ib_port_attr attr;
483 if (!ib_query_port(priv->ca, priv->port, &attr))
484 priv->local_lid = attr.lid;
486 ipoib_warn(priv, "ib_query_port failed\n");
489 if (!priv->broadcast) {
490 struct ipoib_mcast *broadcast;
492 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
495 broadcast = ipoib_mcast_alloc(priv, 1);
497 ipoib_warn(priv, "failed to allocate broadcast group\n");
498 mutex_lock(&mcast_mutex);
499 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
500 queue_delayed_work(ipoib_workqueue,
501 &priv->mcast_task, HZ);
502 mutex_unlock(&mcast_mutex);
506 spin_lock_irq(&priv->lock);
507 memcpy(broadcast->mcmember.mgid.raw, dev->if_broadcastaddr + 4,
508 sizeof (union ib_gid));
509 priv->broadcast = broadcast;
511 __ipoib_mcast_add(priv, priv->broadcast);
512 spin_unlock_irq(&priv->lock);
515 if (priv->broadcast &&
516 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
517 if (priv->broadcast &&
518 !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
519 ipoib_mcast_join(priv, priv->broadcast, 0);
524 struct ipoib_mcast *mcast = NULL;
526 spin_lock_irq(&priv->lock);
527 list_for_each_entry(mcast, &priv->multicast_list, list) {
528 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
529 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
530 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
531 /* Found the next unjoined group */
535 spin_unlock_irq(&priv->lock);
537 if (&mcast->list == &priv->multicast_list) {
542 ipoib_mcast_join(priv, mcast, 1);
546 spin_lock_irq(&priv->lock);
548 priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
550 priv->mcast_mtu = priv->admin_mtu;
551 spin_unlock_irq(&priv->lock);
553 if (!ipoib_cm_admin_enabled(priv))
554 ipoib_change_mtu(priv, min(priv->mcast_mtu, priv->admin_mtu));
556 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
558 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
561 int ipoib_mcast_start_thread(struct ipoib_dev_priv *priv)
563 ipoib_dbg_mcast(priv, "starting multicast thread flags 0x%lX\n",
566 mutex_lock(&mcast_mutex);
567 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
568 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
569 mutex_unlock(&mcast_mutex);
574 int ipoib_mcast_stop_thread(struct ipoib_dev_priv *priv, int flush)
577 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
579 mutex_lock(&mcast_mutex);
580 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
581 cancel_delayed_work(&priv->mcast_task);
582 mutex_unlock(&mcast_mutex);
585 flush_workqueue(ipoib_workqueue);
590 static int ipoib_mcast_leave(struct ipoib_dev_priv *priv, struct ipoib_mcast *mcast)
594 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
595 ib_sa_free_multicast(mcast->mc);
597 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
598 ipoib_dbg_mcast(priv, "leaving MGID %16D\n",
599 mcast->mcmember.mgid.raw, ":");
601 /* Remove ourselves from the multicast group */
602 ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid,
603 be16_to_cpu(mcast->mcmember.mlid));
605 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
612 ipoib_mcast_send(struct ipoib_dev_priv *priv, void *mgid, struct mbuf *mb)
614 struct ifnet *dev = priv->dev;
615 struct ipoib_mcast *mcast;
617 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
619 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
620 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
625 mcast = __ipoib_mcast_find(priv, mgid);
627 /* Let's create a new send only group now */
628 ipoib_dbg_mcast(priv, "setting up send only multicast group for %16D\n",
631 mcast = ipoib_mcast_alloc(priv, 0);
633 ipoib_warn(priv, "unable to allocate memory for "
634 "multicast structure\n");
635 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
640 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
641 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
642 __ipoib_mcast_add(priv, mcast);
643 list_add_tail(&mcast->list, &priv->multicast_list);
647 if (mcast->pkt_queue.ifq_len < IPOIB_MAX_MCAST_QUEUE) {
648 _IF_ENQUEUE(&mcast->pkt_queue, mb);
650 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
654 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
655 ipoib_dbg_mcast(priv, "no address vector, "
656 "but multicast join already started\n");
657 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
658 ipoib_mcast_sendonly_join(mcast);
661 * If lookup completes between here and out:, don't
662 * want to send packet twice.
668 if (mcast && mcast->ah)
669 ipoib_send(priv, mb, mcast->ah, IB_MULTICAST_QPN);
672 void ipoib_mcast_dev_flush(struct ipoib_dev_priv *priv)
674 LIST_HEAD(remove_list);
675 struct ipoib_mcast *mcast, *tmcast;
678 ipoib_dbg_mcast(priv, "flushing multicast list\n");
680 spin_lock_irqsave(&priv->lock, flags);
682 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
683 list_del(&mcast->list);
684 rb_erase(&mcast->rb_node, &priv->multicast_tree);
685 list_add_tail(&mcast->list, &remove_list);
688 if (priv->broadcast) {
689 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
690 list_add_tail(&priv->broadcast->list, &remove_list);
691 priv->broadcast = NULL;
694 spin_unlock_irqrestore(&priv->lock, flags);
696 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
697 ipoib_mcast_leave(priv, mcast);
698 ipoib_mcast_free(mcast);
702 static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
705 if (addrlen != INFINIBAND_ALEN)
707 /* reserved QPN, prefix, scope */
708 if (memcmp(addr, broadcast, 6))
710 /* signature lower, pkey */
711 if (memcmp(addr + 7, broadcast + 7, 3))
716 void ipoib_mcast_restart_task(struct work_struct *work)
718 struct ipoib_dev_priv *priv =
719 container_of(work, struct ipoib_dev_priv, restart_task);
720 ipoib_mcast_restart(priv);
723 void ipoib_mcast_restart(struct ipoib_dev_priv *priv)
725 struct ifnet *dev = priv->dev;
726 struct ifmultiaddr *ifma;
727 struct ipoib_mcast *mcast, *tmcast;
728 LIST_HEAD(remove_list);
729 struct ib_sa_mcmember_rec rec;
732 ipoib_dbg_mcast(priv, "restarting multicast task flags 0x%lX\n",
735 ipoib_mcast_stop_thread(priv, 0);
738 spin_lock(&priv->lock);
741 * Unfortunately, the networking core only gives us a list of all of
742 * the multicast hardware addresses. We need to figure out which ones
743 * are new and which ones have been removed
746 /* Clear out the found flag */
747 list_for_each_entry(mcast, &priv->multicast_list, list)
748 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
750 /* Mark all of the entries that are found or don't exist */
753 TAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) {
757 if (ifma->ifma_addr->sa_family != AF_LINK)
759 addr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
760 addrlen = ((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen;
761 if (!ipoib_mcast_addr_is_valid(addr, addrlen,
762 dev->if_broadcastaddr))
765 memcpy(mgid.raw, addr + 4, sizeof mgid);
767 mcast = __ipoib_mcast_find(priv, &mgid);
768 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
769 struct ipoib_mcast *nmcast;
771 /* ignore group which is directly joined by userspace */
772 if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) &&
773 !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {
774 ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %16D\n",
779 /* Not found or send-only group, let's add a new entry */
780 ipoib_dbg_mcast(priv, "adding multicast entry for mgid %16D\n",
783 nmcast = ipoib_mcast_alloc(priv, 0);
785 ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
789 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
791 nmcast->mcmember.mgid = mgid;
794 /* Destroy the send only entry */
795 list_move_tail(&mcast->list, &remove_list);
797 rb_replace_node(&mcast->rb_node,
799 &priv->multicast_tree);
801 __ipoib_mcast_add(priv, nmcast);
803 list_add_tail(&nmcast->list, &priv->multicast_list);
807 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
810 /* Remove all of the entries don't exist anymore */
811 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
812 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
813 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
814 ipoib_dbg_mcast(priv, "deleting multicast group %16D\n",
815 mcast->mcmember.mgid.raw, ":");
817 rb_erase(&mcast->rb_node, &priv->multicast_tree);
819 /* Move to the remove list */
820 list_move_tail(&mcast->list, &remove_list);
824 spin_unlock(&priv->lock);
825 if_maddr_runlock(dev);
827 /* We have to cancel outside of the spinlock */
828 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
829 ipoib_mcast_leave(mcast->priv, mcast);
830 ipoib_mcast_free(mcast);
833 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
834 ipoib_mcast_start_thread(priv);
837 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
839 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct ipoib_dev_priv *priv)
841 struct ipoib_mcast_iter *iter;
843 iter = kmalloc(sizeof *iter, GFP_KERNEL);
848 memset(iter->mgid.raw, 0, 16);
850 if (ipoib_mcast_iter_next(iter)) {
858 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
860 struct ipoib_dev_priv *priv = iter->priv;
862 struct ipoib_mcast *mcast;
865 spin_lock_irq(&priv->lock);
867 n = rb_first(&priv->multicast_tree);
870 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
872 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
873 sizeof (union ib_gid)) < 0) {
874 iter->mgid = mcast->mcmember.mgid;
875 iter->created = mcast->created;
876 iter->queuelen = mcast->pkt_queue.ifq_len;
877 iter->complete = !!mcast->ah;
878 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
888 spin_unlock_irq(&priv->lock);
893 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
895 unsigned long *created,
896 unsigned int *queuelen,
897 unsigned int *complete,
898 unsigned int *send_only)
901 *created = iter->created;
902 *queuelen = iter->queuelen;
903 *complete = iter->complete;
904 *send_only = iter->send_only;
907 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */