2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_sa.h>
38 #include <linux/mlx4/cmd.h>
39 #include <linux/delay.h>
44 #define MAX_PEND_REQS_PER_FUNC 4
45 #define MAD_TIMEOUT_MS 2000
47 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
48 #define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
49 #define mcg_warn_group(group, format, arg...) \
50 pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
51 (group)->name, group->demux->port, ## arg)
53 #define mcg_error_group(group, format, arg...) \
54 pr_err(" %16s: " format, (group)->name, ## arg)
56 static union ib_gid mgid0;
58 static struct workqueue_struct *clean_wq;
65 enum mcast_group_state {
73 enum mcast_state state;
76 struct list_head pending;
79 struct ib_sa_mcmember_data {
81 union ib_gid port_gid;
89 __be32 sl_flowlabel_hoplimit;
96 struct ib_sa_mcmember_data rec;
98 struct list_head mgid0_list;
99 struct mlx4_ib_demux_ctx *demux;
100 struct mcast_member func[MAX_VFS];
102 struct work_struct work;
103 struct list_head pending_list;
105 enum mcast_group_state state;
106 enum mcast_group_state prev_state;
107 struct ib_sa_mad response_sa_mad;
110 char name[33]; /* MGID string */
111 struct device_attribute dentry;
113 /* refcount is the reference count for the following:
114 1. Each queued request
115 2. Each invocation of the worker thread
116 3. Membership of the port at the SA
120 /* delayed work to clean pending SM request */
121 struct delayed_work timeout_work;
122 struct list_head cleanup_list;
127 struct ib_sa_mad sa_mad;
128 struct list_head group_list;
129 struct list_head func_list;
130 struct mcast_group *group;
135 #define safe_atomic_dec(ref) \
137 if (atomic_dec_and_test(ref)) \
138 mcg_warn_group(group, "did not expect to reach zero\n"); \
141 static const char *get_state_string(enum mcast_group_state state)
146 case MCAST_JOIN_SENT:
147 return "MCAST_JOIN_SENT";
148 case MCAST_LEAVE_SENT:
149 return "MCAST_LEAVE_SENT";
150 case MCAST_RESP_READY:
151 return "MCAST_RESP_READY";
153 return "Invalid State";
156 static struct mcast_group *mcast_find(struct mlx4_ib_demux_ctx *ctx,
159 struct rb_node *node = ctx->mcg_table.rb_node;
160 struct mcast_group *group;
164 group = rb_entry(node, struct mcast_group, node);
165 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
170 node = node->rb_left;
172 node = node->rb_right;
177 static struct mcast_group *mcast_insert(struct mlx4_ib_demux_ctx *ctx,
178 struct mcast_group *group)
180 struct rb_node **link = &ctx->mcg_table.rb_node;
181 struct rb_node *parent = NULL;
182 struct mcast_group *cur_group;
187 cur_group = rb_entry(parent, struct mcast_group, node);
189 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
190 sizeof group->rec.mgid);
192 link = &(*link)->rb_left;
194 link = &(*link)->rb_right;
198 rb_link_node(&group->node, parent, link);
199 rb_insert_color(&group->node, &ctx->mcg_table);
203 static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
205 struct mlx4_ib_dev *dev = ctx->dev;
206 struct ib_ah_attr ah_attr;
208 spin_lock(&dev->sm_lock);
209 if (!dev->sm_ah[ctx->port - 1]) {
210 /* port is not yet Active, sm_ah not ready */
211 spin_unlock(&dev->sm_lock);
214 mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
215 spin_unlock(&dev->sm_lock);
216 return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), ctx->port,
217 IB_QPT_GSI, 0, 1, IB_QP1_QKEY, &ah_attr, mad);
220 static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
223 struct mlx4_ib_dev *dev = ctx->dev;
224 struct ib_mad_agent *agent = dev->send_agent[ctx->port - 1][1];
226 struct ib_ah_attr ah_attr;
228 /* Our agent might not yet be registered when mads start to arrive */
232 ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
234 if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index))
237 wc.dlid_path_bits = 0;
238 wc.port_num = ctx->port;
239 wc.slid = ah_attr.dlid; /* opensm lid */
241 return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad);
244 static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad)
246 struct ib_sa_mad mad;
247 struct ib_sa_mcmember_data *sa_mad_data = (struct ib_sa_mcmember_data *)&mad.data;
250 /* we rely on a mad request as arrived from a VF */
251 memcpy(&mad, sa_mad, sizeof mad);
253 /* fix port GID to be the real one (slave 0) */
254 sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0];
256 /* assign our own TID */
257 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
258 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
260 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
261 /* set timeout handler */
263 /* calls mlx4_ib_mcg_timeout_handler */
264 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
265 msecs_to_jiffies(MAD_TIMEOUT_MS));
271 static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
273 struct ib_sa_mad mad;
274 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
277 memset(&mad, 0, sizeof mad);
278 mad.mad_hdr.base_version = 1;
279 mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
280 mad.mad_hdr.class_version = 2;
281 mad.mad_hdr.method = IB_SA_METHOD_DELETE;
282 mad.mad_hdr.status = cpu_to_be16(0);
283 mad.mad_hdr.class_specific = cpu_to_be16(0);
284 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
285 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
286 mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
287 mad.mad_hdr.attr_mod = cpu_to_be32(0);
288 mad.sa_hdr.sm_key = 0x0;
289 mad.sa_hdr.attr_offset = cpu_to_be16(7);
290 mad.sa_hdr.comp_mask = IB_SA_MCMEMBER_REC_MGID |
291 IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_JOIN_STATE;
293 *sa_data = group->rec;
294 sa_data->scope_join_state = join_state;
296 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
298 group->state = MCAST_IDLE;
300 /* set timeout handler */
302 /* calls mlx4_ib_mcg_timeout_handler */
303 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
304 msecs_to_jiffies(MAD_TIMEOUT_MS));
310 static int send_reply_to_slave(int slave, struct mcast_group *group,
311 struct ib_sa_mad *req_sa_mad, u16 status)
313 struct ib_sa_mad mad;
314 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
315 struct ib_sa_mcmember_data *req_sa_data = (struct ib_sa_mcmember_data *)&req_sa_mad->data;
318 memset(&mad, 0, sizeof mad);
319 mad.mad_hdr.base_version = 1;
320 mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
321 mad.mad_hdr.class_version = 2;
322 mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
323 mad.mad_hdr.status = cpu_to_be16(status);
324 mad.mad_hdr.class_specific = cpu_to_be16(0);
325 mad.mad_hdr.tid = req_sa_mad->mad_hdr.tid;
326 *(u8 *)&mad.mad_hdr.tid = 0; /* resetting tid to 0 */
327 mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
328 mad.mad_hdr.attr_mod = cpu_to_be32(0);
329 mad.sa_hdr.sm_key = req_sa_mad->sa_hdr.sm_key;
330 mad.sa_hdr.attr_offset = cpu_to_be16(7);
331 mad.sa_hdr.comp_mask = 0; /* ignored on responses, see IBTA spec */
333 *sa_data = group->rec;
335 /* reconstruct VF's requested join_state and port_gid */
336 sa_data->scope_join_state &= 0xf0;
337 sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f);
338 memcpy(&sa_data->port_gid, &req_sa_data->port_gid, sizeof req_sa_data->port_gid);
340 ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad);
344 static int check_selector(ib_sa_comp_mask comp_mask,
345 ib_sa_comp_mask selector_mask,
346 ib_sa_comp_mask value_mask,
347 u8 src_value, u8 dst_value)
350 u8 selector = dst_value >> 6;
354 if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
359 err = (src_value <= dst_value);
362 err = (src_value >= dst_value);
365 err = (src_value != dst_value);
375 static u16 cmp_rec(struct ib_sa_mcmember_data *src,
376 struct ib_sa_mcmember_data *dst, ib_sa_comp_mask comp_mask)
378 /* src is group record, dst is request record */
379 /* MGID must already match */
380 /* Port_GID we always replace to our Port_GID, so it is a match */
382 #define MAD_STATUS_REQ_INVALID 0x0200
383 if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
384 return MAD_STATUS_REQ_INVALID;
385 if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
386 return MAD_STATUS_REQ_INVALID;
387 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
388 IB_SA_MCMEMBER_REC_MTU,
389 src->mtusel_mtu, dst->mtusel_mtu))
390 return MAD_STATUS_REQ_INVALID;
391 if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
392 src->tclass != dst->tclass)
393 return MAD_STATUS_REQ_INVALID;
394 if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
395 return MAD_STATUS_REQ_INVALID;
396 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
397 IB_SA_MCMEMBER_REC_RATE,
398 src->ratesel_rate, dst->ratesel_rate))
399 return MAD_STATUS_REQ_INVALID;
400 if (check_selector(comp_mask,
401 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
402 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
403 src->lifetmsel_lifetm, dst->lifetmsel_lifetm))
404 return MAD_STATUS_REQ_INVALID;
405 if (comp_mask & IB_SA_MCMEMBER_REC_SL &&
406 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0xf0000000) !=
407 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0xf0000000))
408 return MAD_STATUS_REQ_INVALID;
409 if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
410 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x0fffff00) !=
411 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x0fffff00))
412 return MAD_STATUS_REQ_INVALID;
413 if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
414 (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x000000ff) !=
415 (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x000000ff))
416 return MAD_STATUS_REQ_INVALID;
417 if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE &&
418 (src->scope_join_state & 0xf0) !=
419 (dst->scope_join_state & 0xf0))
420 return MAD_STATUS_REQ_INVALID;
422 /* join_state checked separately, proxy_join ignored */
427 /* release group, return 1 if this was last release and group is destroyed
428 * timout work is canceled sync */
429 static int release_group(struct mcast_group *group, int from_timeout_handler)
431 struct mlx4_ib_demux_ctx *ctx = group->demux;
434 mutex_lock(&ctx->mcg_table_lock);
435 mutex_lock(&group->lock);
436 if (atomic_dec_and_test(&group->refcount)) {
437 if (!from_timeout_handler) {
438 if (group->state != MCAST_IDLE &&
439 !cancel_delayed_work(&group->timeout_work)) {
440 atomic_inc(&group->refcount);
441 mutex_unlock(&group->lock);
442 mutex_unlock(&ctx->mcg_table_lock);
447 nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0);
449 del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
450 if (!list_empty(&group->pending_list))
451 mcg_warn_group(group, "releasing a group with non empty pending list\n");
453 rb_erase(&group->node, &ctx->mcg_table);
454 list_del_init(&group->mgid0_list);
455 mutex_unlock(&group->lock);
456 mutex_unlock(&ctx->mcg_table_lock);
460 mutex_unlock(&group->lock);
461 mutex_unlock(&ctx->mcg_table_lock);
466 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
470 for (i = 0; i < 3; i++, join_state >>= 1)
471 if (join_state & 0x1)
472 group->members[i] += inc;
475 static u8 get_leave_state(struct mcast_group *group)
480 for (i = 0; i < 3; i++)
481 if (!group->members[i])
482 leave_state |= (1 << i);
484 return leave_state & (group->rec.scope_join_state & 7);
487 static int join_group(struct mcast_group *group, int slave, u8 join_mask)
492 /* remove bits that slave is already member of, and adjust */
493 join_state = join_mask & (~group->func[slave].join_state);
494 adjust_membership(group, join_state, 1);
495 group->func[slave].join_state |= join_state;
496 if (group->func[slave].state != MCAST_MEMBER && join_state) {
497 group->func[slave].state = MCAST_MEMBER;
503 static int leave_group(struct mcast_group *group, int slave, u8 leave_state)
507 adjust_membership(group, leave_state, -1);
508 group->func[slave].join_state &= ~leave_state;
509 if (!group->func[slave].join_state) {
510 group->func[slave].state = MCAST_NOT_MEMBER;
516 static int check_leave(struct mcast_group *group, int slave, u8 leave_mask)
518 if (group->func[slave].state != MCAST_MEMBER)
519 return MAD_STATUS_REQ_INVALID;
521 /* make sure we're not deleting unset bits */
522 if (~group->func[slave].join_state & leave_mask)
523 return MAD_STATUS_REQ_INVALID;
526 return MAD_STATUS_REQ_INVALID;
531 static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
533 struct delayed_work *delay = to_delayed_work(work);
534 struct mcast_group *group;
535 struct mcast_req *req = NULL;
537 group = container_of(delay, typeof(*group), timeout_work);
539 mutex_lock(&group->lock);
540 if (group->state == MCAST_JOIN_SENT) {
541 if (!list_empty(&group->pending_list)) {
542 req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
543 list_del(&req->group_list);
544 list_del(&req->func_list);
545 --group->func[req->func].num_pend_reqs;
546 mutex_unlock(&group->lock);
548 if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) {
549 if (release_group(group, 1))
555 mutex_lock(&group->lock);
557 mcg_warn_group(group, "DRIVER BUG\n");
558 } else if (group->state == MCAST_LEAVE_SENT) {
559 if (group->rec.scope_join_state & 7)
560 group->rec.scope_join_state &= 0xf8;
561 group->state = MCAST_IDLE;
562 mutex_unlock(&group->lock);
563 if (release_group(group, 1))
565 mutex_lock(&group->lock);
567 mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state));
568 group->state = MCAST_IDLE;
569 atomic_inc(&group->refcount);
570 queue_work(group->demux->mcg_wq, &group->work);
571 safe_atomic_dec(&group->refcount);
573 mutex_unlock(&group->lock);
576 static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
577 struct mcast_req *req)
582 leave_mask = group->func[req->func].join_state;
584 status = check_leave(group, req->func, leave_mask);
586 leave_group(group, req->func, leave_mask);
589 send_reply_to_slave(req->func, group, &req->sa_mad, status);
590 --group->func[req->func].num_pend_reqs;
591 list_del(&req->group_list);
592 list_del(&req->func_list);
597 static int handle_join_req(struct mcast_group *group, u8 join_mask,
598 struct mcast_req *req)
600 u8 group_join_state = group->rec.scope_join_state & 7;
603 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
605 if (join_mask == (group_join_state & join_mask)) {
606 /* port's membership need not change */
607 status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask);
609 join_group(group, req->func, join_mask);
611 --group->func[req->func].num_pend_reqs;
612 send_reply_to_slave(req->func, group, &req->sa_mad, status);
613 list_del(&req->group_list);
614 list_del(&req->func_list);
618 /* port's membership needs to be updated */
619 group->prev_state = group->state;
620 if (send_join_to_wire(group, &req->sa_mad)) {
621 --group->func[req->func].num_pend_reqs;
622 list_del(&req->group_list);
623 list_del(&req->func_list);
626 group->state = group->prev_state;
628 group->state = MCAST_JOIN_SENT;
634 static void mlx4_ib_mcg_work_handler(struct work_struct *work)
636 struct mcast_group *group;
637 struct mcast_req *req = NULL;
638 struct ib_sa_mcmember_data *sa_data;
640 int rc = 1; /* release_count - this is for the scheduled work */
644 group = container_of(work, typeof(*group), work);
646 mutex_lock(&group->lock);
648 /* First, let's see if a response from SM is waiting regarding this group.
649 * If so, we need to update the group's REC. If this is a bad response, we
650 * may need to send a bad response to a VF waiting for it. If VF is waiting
651 * and this is a good response, the VF will be answered later in this func. */
652 if (group->state == MCAST_RESP_READY) {
653 /* cancels mlx4_ib_mcg_timeout_handler */
654 cancel_delayed_work(&group->timeout_work);
655 status = be16_to_cpu(group->response_sa_mad.mad_hdr.status);
656 method = group->response_sa_mad.mad_hdr.method;
657 if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) {
658 mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
659 (long long unsigned int)be64_to_cpu(group->response_sa_mad.mad_hdr.tid),
660 (long long unsigned int)be64_to_cpu(group->last_req_tid));
661 group->state = group->prev_state;
662 goto process_requests;
665 if (!list_empty(&group->pending_list))
666 req = list_first_entry(&group->pending_list,
667 struct mcast_req, group_list);
668 if ((method == IB_MGMT_METHOD_GET_RESP)) {
670 send_reply_to_slave(req->func, group, &req->sa_mad, status);
671 --group->func[req->func].num_pend_reqs;
672 list_del(&req->group_list);
673 list_del(&req->func_list);
677 mcg_warn_group(group, "no request for failed join\n");
678 } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing)
684 resp_join_state = ((struct ib_sa_mcmember_data *)
685 group->response_sa_mad.data)->scope_join_state & 7;
686 cur_join_state = group->rec.scope_join_state & 7;
688 if (method == IB_MGMT_METHOD_GET_RESP) {
689 /* successfull join */
690 if (!cur_join_state && resp_join_state)
692 } else if (!resp_join_state)
694 memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec);
696 group->state = MCAST_IDLE;
700 /* We should now go over pending join/leave requests, as long as we are idle. */
701 while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) {
702 req = list_first_entry(&group->pending_list, struct mcast_req,
704 sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
705 req_join_state = sa_data->scope_join_state & 0x7;
707 /* For a leave request, we will immediately answer the VF, and
708 * update our internal counters. The actual leave will be sent
709 * to SM later, if at all needed. We dequeue the request now. */
710 if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE)
711 rc += handle_leave_req(group, req_join_state, req);
713 rc += handle_join_req(group, req_join_state, req);
717 if (group->state == MCAST_IDLE) {
718 req_join_state = get_leave_state(group);
719 if (req_join_state) {
720 group->rec.scope_join_state &= ~req_join_state;
721 group->prev_state = group->state;
722 if (send_leave_to_wire(group, req_join_state)) {
723 group->state = group->prev_state;
726 group->state = MCAST_LEAVE_SENT;
730 if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE)
731 goto process_requests;
732 mutex_unlock(&group->lock);
735 release_group(group, 0);
738 static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx *ctx,
740 union ib_gid *new_mgid)
742 struct mcast_group *group = NULL, *cur_group;
743 struct mcast_req *req;
744 struct list_head *pos;
747 mutex_lock(&ctx->mcg_table_lock);
748 list_for_each_safe(pos, n, &ctx->mcg_mgid0_list) {
749 group = list_entry(pos, struct mcast_group, mgid0_list);
750 mutex_lock(&group->lock);
751 if (group->last_req_tid == tid) {
752 if (memcmp(new_mgid, &mgid0, sizeof mgid0)) {
753 group->rec.mgid = *new_mgid;
754 sprintf(group->name, "%016llx%016llx",
755 (long long unsigned int)be64_to_cpu(group->rec.mgid.global.subnet_prefix),
756 (long long unsigned int)be64_to_cpu(group->rec.mgid.global.interface_id));
757 list_del_init(&group->mgid0_list);
758 cur_group = mcast_insert(ctx, group);
760 /* A race between our code and SM. Silently cleaning the new one */
761 req = list_first_entry(&group->pending_list,
762 struct mcast_req, group_list);
763 --group->func[req->func].num_pend_reqs;
764 list_del(&req->group_list);
765 list_del(&req->func_list);
767 mutex_unlock(&group->lock);
768 mutex_unlock(&ctx->mcg_table_lock);
769 release_group(group, 0);
773 atomic_inc(&group->refcount);
774 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
775 mutex_unlock(&group->lock);
776 mutex_unlock(&ctx->mcg_table_lock);
779 struct mcast_req *tmp1, *tmp2;
781 list_del(&group->mgid0_list);
782 if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE)
783 cancel_delayed_work_sync(&group->timeout_work);
785 list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) {
786 list_del(&tmp1->group_list);
789 mutex_unlock(&group->lock);
790 mutex_unlock(&ctx->mcg_table_lock);
795 mutex_unlock(&group->lock);
797 mutex_unlock(&ctx->mcg_table_lock);
802 static ssize_t sysfs_show_group(struct device *dev,
803 struct device_attribute *attr, char *buf);
805 static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
806 union ib_gid *mgid, int create,
809 struct mcast_group *group, *cur_group;
813 is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
815 group = mcast_find(ctx, mgid);
821 return ERR_PTR(-ENOENT);
823 group = kzalloc(sizeof *group, gfp_mask);
825 return ERR_PTR(-ENOMEM);
828 group->rec.mgid = *mgid;
829 INIT_LIST_HEAD(&group->pending_list);
830 INIT_LIST_HEAD(&group->mgid0_list);
831 for (i = 0; i < MAX_VFS; ++i)
832 INIT_LIST_HEAD(&group->func[i].pending);
833 INIT_WORK(&group->work, mlx4_ib_mcg_work_handler);
834 INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler);
835 mutex_init(&group->lock);
836 sprintf(group->name, "%016llx%016llx",
837 (long long unsigned int)be64_to_cpu(group->rec.mgid.global.subnet_prefix),
838 (long long unsigned int)be64_to_cpu(group->rec.mgid.global.interface_id));
839 sysfs_attr_init(&group->dentry.attr);
840 group->dentry.show = sysfs_show_group;
841 group->dentry.store = NULL;
842 group->dentry.attr.name = group->name;
843 group->dentry.attr.mode = 0400;
844 group->state = MCAST_IDLE;
847 list_add(&group->mgid0_list, &ctx->mcg_mgid0_list);
851 cur_group = mcast_insert(ctx, group);
853 mcg_warn("group just showed up %s - confused\n", cur_group->name);
855 return ERR_PTR(-EINVAL);
858 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
861 atomic_inc(&group->refcount);
865 static void queue_req(struct mcast_req *req)
867 struct mcast_group *group = req->group;
869 atomic_inc(&group->refcount); /* for the request */
870 atomic_inc(&group->refcount); /* for scheduling the work */
871 list_add_tail(&req->group_list, &group->pending_list);
872 list_add_tail(&req->func_list, &group->func[req->func].pending);
873 /* calls mlx4_ib_mcg_work_handler */
874 queue_work(group->demux->mcg_wq, &group->work);
875 safe_atomic_dec(&group->refcount);
878 int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
879 struct ib_sa_mad *mad)
881 struct mlx4_ib_dev *dev = to_mdev(ibdev);
882 struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)mad->data;
883 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
884 struct mcast_group *group;
886 switch (mad->mad_hdr.method) {
887 case IB_MGMT_METHOD_GET_RESP:
888 case IB_SA_METHOD_DELETE_RESP:
889 mutex_lock(&ctx->mcg_table_lock);
890 group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL);
891 mutex_unlock(&ctx->mcg_table_lock);
893 if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) {
894 __be64 tid = mad->mad_hdr.tid;
895 *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */
896 group = search_relocate_mgid0_group(ctx, tid, &rec->mgid);
904 mutex_lock(&group->lock);
905 group->response_sa_mad = *mad;
906 group->prev_state = group->state;
907 group->state = MCAST_RESP_READY;
908 /* calls mlx4_ib_mcg_work_handler */
909 atomic_inc(&group->refcount);
910 queue_work(ctx->mcg_wq, &group->work);
911 safe_atomic_dec(&group->refcount);
912 mutex_unlock(&group->lock);
913 release_group(group, 0);
914 return 1; /* consumed */
915 case IB_MGMT_METHOD_SET:
916 case IB_SA_METHOD_GET_TABLE:
917 case IB_SA_METHOD_GET_TABLE_RESP:
918 case IB_SA_METHOD_DELETE:
919 return 0; /* not consumed, pass-through to guest over tunnel */
921 mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
922 port, mad->mad_hdr.method);
923 return 1; /* consumed */
927 int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
928 int slave, struct ib_sa_mad *sa_mad)
930 struct mlx4_ib_dev *dev = to_mdev(ibdev);
931 struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)sa_mad->data;
932 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
933 struct mcast_group *group;
934 struct mcast_req *req;
940 switch (sa_mad->mad_hdr.method) {
941 case IB_MGMT_METHOD_SET:
943 case IB_SA_METHOD_DELETE:
944 req = kzalloc(sizeof *req, GFP_KERNEL);
949 req->sa_mad = *sa_mad;
951 mutex_lock(&ctx->mcg_table_lock);
952 group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL);
953 mutex_unlock(&ctx->mcg_table_lock);
956 return PTR_ERR(group);
958 mutex_lock(&group->lock);
959 if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) {
960 mutex_unlock(&group->lock);
961 mcg_warn_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n",
962 port, slave, MAX_PEND_REQS_PER_FUNC);
963 release_group(group, 0);
967 ++group->func[slave].num_pend_reqs;
970 mutex_unlock(&group->lock);
971 release_group(group, 0);
972 return 1; /* consumed */
973 case IB_SA_METHOD_GET_TABLE:
974 case IB_MGMT_METHOD_GET_RESP:
975 case IB_SA_METHOD_GET_TABLE_RESP:
976 case IB_SA_METHOD_DELETE_RESP:
977 return 0; /* not consumed, pass-through */
979 mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
980 port, slave, sa_mad->mad_hdr.method);
981 return 1; /* consumed */
985 static ssize_t sysfs_show_group(struct device *dev,
986 struct device_attribute *attr, char *buf)
988 struct mcast_group *group =
989 container_of(attr, struct mcast_group, dentry);
990 struct mcast_req *req = NULL;
991 char pending_str[40];
996 if (group->state == MCAST_IDLE)
997 sprintf(state_str, "%s", get_state_string(group->state));
999 sprintf(state_str, "%s(TID=0x%llx)",
1000 get_state_string(group->state),
1001 (long long unsigned int)be64_to_cpu(group->last_req_tid));
1002 if (list_empty(&group->pending_list)) {
1003 sprintf(pending_str, "No");
1005 req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
1006 sprintf(pending_str, "Yes(TID=0x%llx)",
1007 (long long unsigned int)be64_to_cpu(req->sa_mad.mad_hdr.tid));
1009 len += sprintf(buf + len, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
1010 group->rec.scope_join_state & 0xf,
1011 group->members[2], group->members[1], group->members[0],
1012 atomic_read(&group->refcount),
1015 for (f = 0; f < MAX_VFS; ++f)
1016 if (group->func[f].state == MCAST_MEMBER)
1017 len += sprintf(buf + len, "%d[%1x] ",
1018 f, group->func[f].join_state);
1020 len += sprintf(buf + len, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
1021 "%4x %4x %2x %2x)\n",
1022 be16_to_cpu(group->rec.pkey),
1023 be32_to_cpu(group->rec.qkey),
1024 (group->rec.mtusel_mtu & 0xc0) >> 6,
1025 group->rec.mtusel_mtu & 0x3f,
1027 (group->rec.ratesel_rate & 0xc0) >> 6,
1028 group->rec.ratesel_rate & 0x3f,
1029 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28,
1030 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8,
1031 be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff,
1032 group->rec.proxy_join);
1037 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
1041 atomic_set(&ctx->tid, 0);
1042 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
1043 ctx->mcg_wq = create_singlethread_workqueue(name);
1047 mutex_init(&ctx->mcg_table_lock);
1048 ctx->mcg_table = RB_ROOT;
1049 INIT_LIST_HEAD(&ctx->mcg_mgid0_list);
1055 static void force_clean_group(struct mcast_group *group)
1057 struct mcast_req *req, *tmp
1059 list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) {
1060 list_del(&req->group_list);
1063 del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr);
1064 rb_erase(&group->node, &group->demux->mcg_table);
1068 static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
1072 struct mcast_group *group;
1076 for (i = 0; i < MAX_VFS; ++i)
1077 clean_vf_mcast(ctx, i);
1079 end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
1082 mutex_lock(&ctx->mcg_table_lock);
1083 for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p))
1085 mutex_unlock(&ctx->mcg_table_lock);
1090 } while (time_after(end, jiffies));
1092 flush_workqueue(ctx->mcg_wq);
1094 destroy_workqueue(ctx->mcg_wq);
1096 mutex_lock(&ctx->mcg_table_lock);
1097 while ((p = rb_first(&ctx->mcg_table)) != NULL) {
1098 group = rb_entry(p, struct mcast_group, node);
1099 if (atomic_read(&group->refcount))
1100 mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
1102 force_clean_group(group);
1104 mutex_unlock(&ctx->mcg_table_lock);
1108 struct work_struct work;
1109 struct mlx4_ib_demux_ctx *ctx;
1113 static void mcg_clean_task(struct work_struct *work)
1115 struct clean_work *cw = container_of(work, struct clean_work, work);
1117 _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq);
1118 cw->ctx->flushing = 0;
1122 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
1124 struct clean_work *work;
1132 _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq);
1137 work = kmalloc(sizeof *work, GFP_KERNEL);
1140 mcg_warn("failed allocating work for cleanup\n");
1145 work->destroy_wq = destroy_wq;
1146 INIT_WORK(&work->work, mcg_clean_task);
1147 queue_work(clean_wq, &work->work);
1150 static void build_leave_mad(struct mcast_req *req)
1152 struct ib_sa_mad *mad = &req->sa_mad;
1154 mad->mad_hdr.method = IB_SA_METHOD_DELETE;
1158 static void clear_pending_reqs(struct mcast_group *group, int vf)
1160 struct mcast_req *req, *tmp, *group_first = NULL;
1164 if (!list_empty(&group->pending_list))
1165 group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list);
1167 list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) {
1169 if (group_first == req &&
1170 (group->state == MCAST_JOIN_SENT ||
1171 group->state == MCAST_LEAVE_SENT)) {
1172 clear = cancel_delayed_work(&group->timeout_work);
1174 group->state = MCAST_IDLE;
1177 --group->func[vf].num_pend_reqs;
1178 list_del(&req->group_list);
1179 list_del(&req->func_list);
1181 atomic_dec(&group->refcount);
1185 if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) {
1186 mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
1187 list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs);
1191 static int push_deleteing_req(struct mcast_group *group, int slave)
1193 struct mcast_req *req;
1194 struct mcast_req *pend_req;
1196 if (!group->func[slave].join_state)
1199 req = kzalloc(sizeof *req, GFP_KERNEL);
1201 mcg_warn_group(group, "failed allocation - may leave stall groups\n");
1205 if (!list_empty(&group->func[slave].pending)) {
1206 pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
1207 if (pend_req->clean) {
1216 ++group->func[slave].num_pend_reqs;
1217 build_leave_mad(req);
1222 void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
1224 struct mcast_group *group;
1227 mutex_lock(&ctx->mcg_table_lock);
1228 for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) {
1229 group = rb_entry(p, struct mcast_group, node);
1230 mutex_lock(&group->lock);
1231 if (atomic_read(&group->refcount)) {
1232 /* clear pending requests of this VF */
1233 clear_pending_reqs(group, slave);
1234 push_deleteing_req(group, slave);
1236 mutex_unlock(&group->lock);
1238 mutex_unlock(&ctx->mcg_table_lock);
1242 int mlx4_ib_mcg_init(void)
1244 clean_wq = create_singlethread_workqueue("mlx4_ib_mcg");
1251 void mlx4_ib_mcg_destroy(void)
1253 destroy_workqueue(clean_wq);