2 * Copyright (c) 2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
37 #include <linux/bitops.h>
38 #include <linux/random.h>
40 #include <rdma/ib_cache.h>
43 static void mcast_add_one(struct ib_device *device);
44 static void mcast_remove_one(struct ib_device *device);
46 static struct ib_client mcast_client = {
47 .name = "ib_multicast",
49 .remove = mcast_remove_one
52 static struct ib_sa_client sa_client;
53 static struct workqueue_struct *mcast_wq;
54 static union ib_gid mgid0;
59 struct mcast_device *dev;
63 struct completion comp;
68 struct ib_device *device;
69 struct ib_event_handler event_handler;
72 struct mcast_port port[0];
81 enum mcast_group_state {
89 MCAST_INVALID_PKEY_INDEX = 0xFFFF
95 struct ib_sa_mcmember_rec rec;
97 struct mcast_port *port;
99 struct work_struct work;
100 struct list_head pending_list;
101 struct list_head active_list;
102 struct mcast_member *last_join;
105 enum mcast_group_state state;
106 struct ib_sa_query *query;
113 struct mcast_member {
114 struct ib_sa_multicast multicast;
115 struct ib_sa_client *client;
116 struct mcast_group *group;
117 struct list_head list;
118 enum mcast_state state;
120 struct completion comp;
123 static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
125 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
128 static struct mcast_group *mcast_find(struct mcast_port *port,
131 struct rb_node *node = port->table.rb_node;
132 struct mcast_group *group;
136 group = rb_entry(node, struct mcast_group, node);
137 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
142 node = node->rb_left;
144 node = node->rb_right;
149 static struct mcast_group *mcast_insert(struct mcast_port *port,
150 struct mcast_group *group,
151 int allow_duplicates)
153 struct rb_node **link = &port->table.rb_node;
154 struct rb_node *parent = NULL;
155 struct mcast_group *cur_group;
160 cur_group = rb_entry(parent, struct mcast_group, node);
162 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
163 sizeof group->rec.mgid);
165 link = &(*link)->rb_left;
167 link = &(*link)->rb_right;
168 else if (allow_duplicates)
169 link = &(*link)->rb_left;
173 rb_link_node(&group->node, parent, link);
174 rb_insert_color(&group->node, &port->table);
178 static void deref_port(struct mcast_port *port)
180 if (atomic_dec_and_test(&port->refcount))
181 complete(&port->comp);
184 static void release_group(struct mcast_group *group)
186 struct mcast_port *port = group->port;
189 spin_lock_irqsave(&port->lock, flags);
190 if (atomic_dec_and_test(&group->refcount)) {
191 rb_erase(&group->node, &port->table);
192 spin_unlock_irqrestore(&port->lock, flags);
196 spin_unlock_irqrestore(&port->lock, flags);
199 static void deref_member(struct mcast_member *member)
201 if (atomic_dec_and_test(&member->refcount))
202 complete(&member->comp);
205 static void queue_join(struct mcast_member *member)
207 struct mcast_group *group = member->group;
210 spin_lock_irqsave(&group->lock, flags);
211 list_add_tail(&member->list, &group->pending_list);
212 if (group->state == MCAST_IDLE) {
213 group->state = MCAST_BUSY;
214 atomic_inc(&group->refcount);
215 queue_work(mcast_wq, &group->work);
217 spin_unlock_irqrestore(&group->lock, flags);
221 * A multicast group has three types of members: full member, non member, and
222 * send only member. We need to keep track of the number of members of each
223 * type based on their join state. Adjust the number of members the belong to
224 * the specified join states.
226 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
230 for (i = 0; i < 3; i++, join_state >>= 1)
231 if (join_state & 0x1)
232 group->members[i] += inc;
236 * If a multicast group has zero members left for a particular join state, but
237 * the group is still a member with the SA, we need to leave that join state.
238 * Determine which join states we still belong to, but that do not have any
241 static u8 get_leave_state(struct mcast_group *group)
246 for (i = 0; i < 3; i++)
247 if (!group->members[i])
248 leave_state |= (0x1 << i);
250 return leave_state & group->rec.join_state;
253 static int cmp_rec(struct ib_sa_mcmember_rec *src,
254 struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask)
256 /* MGID must already match */
258 if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID &&
259 memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid))
261 if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
263 if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
265 if (ib_sa_check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
266 IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector,
269 if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
270 src->traffic_class != dst->traffic_class)
272 if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
274 if (ib_sa_check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
275 IB_SA_MCMEMBER_REC_RATE, dst->rate_selector,
276 src->rate, dst->rate))
278 if (ib_sa_check_selector(comp_mask,
279 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
280 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
281 dst->packet_life_time_selector,
282 src->packet_life_time, dst->packet_life_time))
284 if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl)
286 if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
287 src->flow_label != dst->flow_label)
289 if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
290 src->hop_limit != dst->hop_limit)
292 if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope)
295 /* join_state checked separately, proxy_join ignored */
300 static int send_join(struct mcast_group *group, struct mcast_member *member)
302 struct mcast_port *port = group->port;
305 group->last_join = member;
306 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
307 port->port_num, IB_MGMT_METHOD_SET,
308 &member->multicast.rec,
309 member->multicast.comp_mask,
310 3000, GFP_KERNEL, join_handler, group,
313 group->query_id = ret;
319 static int send_leave(struct mcast_group *group, u8 leave_state)
321 struct mcast_port *port = group->port;
322 struct ib_sa_mcmember_rec rec;
326 rec.join_state = leave_state;
327 group->leave_state = leave_state;
329 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
330 port->port_num, IB_SA_METHOD_DELETE, &rec,
331 IB_SA_MCMEMBER_REC_MGID |
332 IB_SA_MCMEMBER_REC_PORT_GID |
333 IB_SA_MCMEMBER_REC_JOIN_STATE,
334 3000, GFP_KERNEL, leave_handler,
335 group, &group->query);
337 group->query_id = ret;
343 static void join_group(struct mcast_group *group, struct mcast_member *member,
346 member->state = MCAST_MEMBER;
347 adjust_membership(group, join_state, 1);
348 group->rec.join_state |= join_state;
349 member->multicast.rec = group->rec;
350 member->multicast.rec.join_state = join_state;
351 list_move(&member->list, &group->active_list);
354 static int fail_join(struct mcast_group *group, struct mcast_member *member,
357 spin_lock_irq(&group->lock);
358 list_del_init(&member->list);
359 spin_unlock_irq(&group->lock);
360 return member->multicast.callback(status, &member->multicast);
363 static void process_group_error(struct mcast_group *group)
365 struct mcast_member *member;
369 if (group->state == MCAST_PKEY_EVENT)
370 ret = ib_find_pkey(group->port->dev->device,
371 group->port->port_num,
372 be16_to_cpu(group->rec.pkey), &pkey_index);
374 spin_lock_irq(&group->lock);
375 if (group->state == MCAST_PKEY_EVENT && !ret &&
376 group->pkey_index == pkey_index)
379 while (!list_empty(&group->active_list)) {
380 member = list_entry(group->active_list.next,
381 struct mcast_member, list);
382 atomic_inc(&member->refcount);
383 list_del_init(&member->list);
384 adjust_membership(group, member->multicast.rec.join_state, -1);
385 member->state = MCAST_ERROR;
386 spin_unlock_irq(&group->lock);
388 ret = member->multicast.callback(-ENETRESET,
390 deref_member(member);
392 ib_sa_free_multicast(&member->multicast);
393 spin_lock_irq(&group->lock);
396 group->rec.join_state = 0;
398 group->state = MCAST_BUSY;
399 spin_unlock_irq(&group->lock);
402 static void mcast_work_handler(struct work_struct *work)
404 struct mcast_group *group;
405 struct mcast_member *member;
406 struct ib_sa_multicast *multicast;
410 group = container_of(work, typeof(*group), work);
412 spin_lock_irq(&group->lock);
413 while (!list_empty(&group->pending_list) ||
414 (group->state != MCAST_BUSY)) {
416 if (group->state != MCAST_BUSY) {
417 spin_unlock_irq(&group->lock);
418 process_group_error(group);
422 member = list_entry(group->pending_list.next,
423 struct mcast_member, list);
424 multicast = &member->multicast;
425 join_state = multicast->rec.join_state;
426 atomic_inc(&member->refcount);
428 if (join_state == (group->rec.join_state & join_state)) {
429 status = cmp_rec(&group->rec, &multicast->rec,
430 multicast->comp_mask);
432 join_group(group, member, join_state);
434 list_del_init(&member->list);
435 spin_unlock_irq(&group->lock);
436 ret = multicast->callback(status, multicast);
438 spin_unlock_irq(&group->lock);
439 status = send_join(group, member);
441 deref_member(member);
444 ret = fail_join(group, member, status);
447 deref_member(member);
449 ib_sa_free_multicast(&member->multicast);
450 spin_lock_irq(&group->lock);
453 join_state = get_leave_state(group);
455 group->rec.join_state &= ~join_state;
456 spin_unlock_irq(&group->lock);
457 if (send_leave(group, join_state))
460 group->state = MCAST_IDLE;
461 spin_unlock_irq(&group->lock);
462 release_group(group);
467 * Fail a join request if it is still active - at the head of the pending queue.
469 static void process_join_error(struct mcast_group *group, int status)
471 struct mcast_member *member;
474 spin_lock_irq(&group->lock);
475 member = list_entry(group->pending_list.next,
476 struct mcast_member, list);
477 if (group->last_join == member) {
478 atomic_inc(&member->refcount);
479 list_del_init(&member->list);
480 spin_unlock_irq(&group->lock);
481 ret = member->multicast.callback(status, &member->multicast);
482 deref_member(member);
484 ib_sa_free_multicast(&member->multicast);
486 spin_unlock_irq(&group->lock);
489 static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
492 struct mcast_group *group = context;
493 u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
496 process_join_error(group, status);
498 ib_find_pkey(group->port->dev->device, group->port->port_num,
499 be16_to_cpu(rec->pkey), &pkey_index);
501 spin_lock_irq(&group->port->lock);
503 if (group->state == MCAST_BUSY &&
504 group->pkey_index == MCAST_INVALID_PKEY_INDEX)
505 group->pkey_index = pkey_index;
506 if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
507 rb_erase(&group->node, &group->port->table);
508 mcast_insert(group->port, group, 1);
510 spin_unlock_irq(&group->port->lock);
512 mcast_work_handler(&group->work);
515 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
518 struct mcast_group *group = context;
520 if (status && (group->retries > 0) &&
521 !send_leave(group, group->leave_state))
524 mcast_work_handler(&group->work);
527 static struct mcast_group *acquire_group(struct mcast_port *port,
528 union ib_gid *mgid, gfp_t gfp_mask)
530 struct mcast_group *group, *cur_group;
534 is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
536 spin_lock_irqsave(&port->lock, flags);
537 group = mcast_find(port, mgid);
540 spin_unlock_irqrestore(&port->lock, flags);
543 group = kzalloc(sizeof *group, gfp_mask);
549 group->rec.mgid = *mgid;
550 group->pkey_index = MCAST_INVALID_PKEY_INDEX;
551 INIT_LIST_HEAD(&group->pending_list);
552 INIT_LIST_HEAD(&group->active_list);
553 INIT_WORK(&group->work, mcast_work_handler);
554 spin_lock_init(&group->lock);
556 spin_lock_irqsave(&port->lock, flags);
557 cur_group = mcast_insert(port, group, is_mgid0);
562 atomic_inc(&port->refcount);
564 atomic_inc(&group->refcount);
565 spin_unlock_irqrestore(&port->lock, flags);
570 * We serialize all join requests to a single group to make our lives much
571 * easier. Otherwise, two users could try to join the same group
572 * simultaneously, with different configurations, one could leave while the
573 * join is in progress, etc., which makes locking around error recovery
576 struct ib_sa_multicast *
577 ib_sa_join_multicast(struct ib_sa_client *client,
578 struct ib_device *device, u8 port_num,
579 struct ib_sa_mcmember_rec *rec,
580 ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
581 int (*callback)(int status,
582 struct ib_sa_multicast *multicast),
585 struct mcast_device *dev;
586 struct mcast_member *member;
587 struct ib_sa_multicast *multicast;
590 dev = ib_get_client_data(device, &mcast_client);
592 return ERR_PTR(-ENODEV);
594 member = kmalloc(sizeof *member, gfp_mask);
596 return ERR_PTR(-ENOMEM);
598 ib_sa_client_get(client);
599 member->client = client;
600 member->multicast.rec = *rec;
601 member->multicast.comp_mask = comp_mask;
602 member->multicast.callback = callback;
603 member->multicast.context = context;
604 init_completion(&member->comp);
605 atomic_set(&member->refcount, 1);
606 member->state = MCAST_JOINING;
608 member->group = acquire_group(&dev->port[port_num - dev->start_port],
609 &rec->mgid, gfp_mask);
610 if (!member->group) {
616 * The user will get the multicast structure in their callback. They
617 * could then free the multicast structure before we can return from
618 * this routine. So we save the pointer to return before queuing
621 multicast = &member->multicast;
626 ib_sa_client_put(client);
630 EXPORT_SYMBOL(ib_sa_join_multicast);
632 void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
634 struct mcast_member *member;
635 struct mcast_group *group;
637 member = container_of(multicast, struct mcast_member, multicast);
638 group = member->group;
640 spin_lock_irq(&group->lock);
641 if (member->state == MCAST_MEMBER)
642 adjust_membership(group, multicast->rec.join_state, -1);
644 list_del_init(&member->list);
646 if (group->state == MCAST_IDLE) {
647 group->state = MCAST_BUSY;
648 spin_unlock_irq(&group->lock);
649 /* Continue to hold reference on group until callback */
650 queue_work(mcast_wq, &group->work);
652 spin_unlock_irq(&group->lock);
653 release_group(group);
656 deref_member(member);
657 wait_for_completion(&member->comp);
658 ib_sa_client_put(member->client);
661 EXPORT_SYMBOL(ib_sa_free_multicast);
663 int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
664 union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
666 struct mcast_device *dev;
667 struct mcast_port *port;
668 struct mcast_group *group;
672 dev = ib_get_client_data(device, &mcast_client);
676 port = &dev->port[port_num - dev->start_port];
677 spin_lock_irqsave(&port->lock, flags);
678 group = mcast_find(port, mgid);
682 ret = -EADDRNOTAVAIL;
683 spin_unlock_irqrestore(&port->lock, flags);
687 EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
689 int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
690 struct ib_sa_mcmember_rec *rec,
691 struct ib_ah_attr *ah_attr)
697 ret = ib_find_cached_gid(device, &rec->port_gid, &p, &gid_index);
701 memset(ah_attr, 0, sizeof *ah_attr);
702 ah_attr->dlid = be16_to_cpu(rec->mlid);
703 ah_attr->sl = rec->sl;
704 ah_attr->port_num = port_num;
705 ah_attr->static_rate = rec->rate;
707 ah_attr->ah_flags = IB_AH_GRH;
708 ah_attr->grh.dgid = rec->mgid;
710 ah_attr->grh.sgid_index = (u8) gid_index;
711 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
712 ah_attr->grh.hop_limit = rec->hop_limit;
713 ah_attr->grh.traffic_class = rec->traffic_class;
717 EXPORT_SYMBOL(ib_init_ah_from_mcmember);
719 static void mcast_groups_event(struct mcast_port *port,
720 enum mcast_group_state state)
722 struct mcast_group *group;
723 struct rb_node *node;
726 spin_lock_irqsave(&port->lock, flags);
727 for (node = rb_first(&port->table); node; node = rb_next(node)) {
728 group = rb_entry(node, struct mcast_group, node);
729 spin_lock(&group->lock);
730 if (group->state == MCAST_IDLE) {
731 atomic_inc(&group->refcount);
732 queue_work(mcast_wq, &group->work);
734 if (group->state != MCAST_GROUP_ERROR)
735 group->state = state;
736 spin_unlock(&group->lock);
738 spin_unlock_irqrestore(&port->lock, flags);
741 static void mcast_event_handler(struct ib_event_handler *handler,
742 struct ib_event *event)
744 struct mcast_device *dev;
747 dev = container_of(handler, struct mcast_device, event_handler);
748 if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
749 IB_LINK_LAYER_INFINIBAND)
752 index = event->element.port_num - dev->start_port;
754 switch (event->event) {
755 case IB_EVENT_PORT_ERR:
756 case IB_EVENT_LID_CHANGE:
757 case IB_EVENT_SM_CHANGE:
758 case IB_EVENT_CLIENT_REREGISTER:
759 mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
761 case IB_EVENT_PKEY_CHANGE:
762 mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
769 static void mcast_add_one(struct ib_device *device)
771 struct mcast_device *dev;
772 struct mcast_port *port;
776 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
779 dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
784 if (device->node_type == RDMA_NODE_IB_SWITCH)
785 dev->start_port = dev->end_port = 0;
788 dev->end_port = device->phys_port_cnt;
791 for (i = 0; i <= dev->end_port - dev->start_port; i++) {
792 if (rdma_port_get_link_layer(device, dev->start_port + i) !=
793 IB_LINK_LAYER_INFINIBAND)
795 port = &dev->port[i];
797 port->port_num = dev->start_port + i;
798 spin_lock_init(&port->lock);
799 port->table = RB_ROOT;
800 init_completion(&port->comp);
801 atomic_set(&port->refcount, 1);
810 dev->device = device;
811 ib_set_client_data(device, &mcast_client, dev);
813 INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
814 ib_register_event_handler(&dev->event_handler);
817 static void mcast_remove_one(struct ib_device *device)
819 struct mcast_device *dev;
820 struct mcast_port *port;
823 dev = ib_get_client_data(device, &mcast_client);
827 ib_unregister_event_handler(&dev->event_handler);
828 flush_workqueue(mcast_wq);
830 for (i = 0; i <= dev->end_port - dev->start_port; i++) {
831 if (rdma_port_get_link_layer(device, dev->start_port + i) ==
832 IB_LINK_LAYER_INFINIBAND) {
833 port = &dev->port[i];
835 wait_for_completion(&port->comp);
846 mcast_wq = create_singlethread_workqueue("ib_mcast");
850 ib_sa_register_client(&sa_client);
852 ret = ib_register_client(&mcast_client);
858 ib_sa_unregister_client(&sa_client);
859 destroy_workqueue(mcast_wq);
863 void mcast_cleanup(void)
865 ib_unregister_client(&mcast_client);
866 ib_sa_unregister_client(&sa_client);
867 destroy_workqueue(mcast_wq);