2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/interrupt.h>
42 #include <linux/random.h>
43 #include <linux/rbtree.h>
44 #include <linux/spinlock.h>
45 #include <linux/sysfs.h>
46 #include <linux/workqueue.h>
47 #include <linux/kdev_t.h>
48 #include <linux/string.h>
50 #include <asm/atomic-long.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("InfiniBand CM");
58 MODULE_LICENSE("Dual BSD/GPL");
63 * Limit CM message timeouts to something reasonable:
64 * 8 seconds per message, with up to 15 retries
66 static int max_timeout = 21;
67 module_param(max_timeout, int, 0644);
68 MODULE_PARM_DESC(max_timeout, "Maximum IB CM per message timeout "
69 "(default=21, or ~8 seconds)");
71 static void cm_add_one(struct ib_device *device);
72 static void cm_remove_one(struct ib_device *device);
74 static struct ib_client cm_client = {
77 .remove = cm_remove_one
82 struct list_head device_list;
84 struct rb_root listen_service_table;
85 u64 listen_service_id;
86 /* struct rb_root peer_service_table; todo: fix peer to peer */
87 struct rb_root remote_qp_table;
88 struct rb_root remote_id_table;
89 struct rb_root remote_sidr_table;
90 struct idr local_id_table;
91 __be32 random_id_operand;
92 struct list_head timewait_list;
93 struct workqueue_struct *wq;
96 /* Counter indexes ordered by attribute ID */
110 CM_ATTR_ID_OFFSET = 0x0010,
121 static char const counter_group_names[CM_COUNTER_GROUPS]
122 [sizeof("cm_rx_duplicates")] = {
123 "cm_tx_msgs", "cm_tx_retries",
124 "cm_rx_msgs", "cm_rx_duplicates"
127 struct cm_counter_group {
129 atomic_long_t counter[CM_ATTR_COUNT];
132 struct cm_counter_attribute {
133 struct attribute attr;
137 #define CM_COUNTER_ATTR(_name, _index) \
138 struct cm_counter_attribute cm_##_name##_counter_attr = { \
139 .attr = { .name = __stringify(_name), .mode = 0444 }, \
143 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
144 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
145 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
146 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
147 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
148 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
149 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
150 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
151 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
152 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
153 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
155 static struct attribute *cm_counter_default_attrs[] = {
156 &cm_req_counter_attr.attr,
157 &cm_mra_counter_attr.attr,
158 &cm_rej_counter_attr.attr,
159 &cm_rep_counter_attr.attr,
160 &cm_rtu_counter_attr.attr,
161 &cm_dreq_counter_attr.attr,
162 &cm_drep_counter_attr.attr,
163 &cm_sidr_req_counter_attr.attr,
164 &cm_sidr_rep_counter_attr.attr,
165 &cm_lap_counter_attr.attr,
166 &cm_apr_counter_attr.attr,
171 struct cm_device *cm_dev;
172 struct ib_mad_agent *mad_agent;
173 struct kobject port_obj;
175 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
179 struct list_head list;
180 struct ib_device *ib_device;
181 struct device *device;
183 struct cm_port *port[0];
187 struct cm_port *port;
189 struct ib_ah_attr ah_attr;
195 struct delayed_work work;
196 struct list_head list;
197 struct cm_port *port;
198 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
199 __be32 local_id; /* Established / timewait */
201 struct ib_cm_event cm_event;
202 struct ib_sa_path_rec path[0];
205 struct cm_timewait_info {
206 struct cm_work work; /* Must be first. */
207 struct list_head list;
208 struct rb_node remote_qp_node;
209 struct rb_node remote_id_node;
210 __be64 remote_ca_guid;
212 u8 inserted_remote_qp;
213 u8 inserted_remote_id;
216 struct cm_id_private {
219 struct rb_node service_node;
220 struct rb_node sidr_id_node;
221 spinlock_t lock; /* Do not acquire inside cm.lock */
222 struct completion comp;
225 struct ib_mad_send_buf *msg;
226 struct cm_timewait_info *timewait_info;
227 /* todo: use alternate port on send failure */
230 struct ib_cm_compare_data *compare_data;
236 enum ib_qp_type qp_type;
240 enum ib_mtu path_mtu;
245 u8 responder_resources;
252 struct list_head work_list;
256 static void cm_work_handler(struct work_struct *work);
258 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
260 if (atomic_dec_and_test(&cm_id_priv->refcount))
261 complete(&cm_id_priv->comp);
264 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
265 struct ib_mad_send_buf **msg)
267 struct ib_mad_agent *mad_agent;
268 struct ib_mad_send_buf *m;
271 mad_agent = cm_id_priv->av.port->mad_agent;
272 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
276 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
277 cm_id_priv->av.pkey_index,
278 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
285 /* Timeout set by caller if response is expected. */
287 m->retries = cm_id_priv->max_cm_retries;
289 atomic_inc(&cm_id_priv->refcount);
290 m->context[0] = cm_id_priv;
295 static int cm_alloc_response_msg(struct cm_port *port,
296 struct ib_mad_recv_wc *mad_recv_wc,
297 struct ib_mad_send_buf **msg)
299 struct ib_mad_send_buf *m;
302 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
303 mad_recv_wc->recv_buf.grh, port->port_num);
307 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
308 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
319 static void cm_free_msg(struct ib_mad_send_buf *msg)
321 ib_destroy_ah(msg->ah);
323 cm_deref_id(msg->context[0]);
324 ib_free_send_mad(msg);
327 static void * cm_copy_private_data(const void *private_data,
332 if (!private_data || !private_data_len)
335 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
337 return ERR_PTR(-ENOMEM);
342 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
343 void *private_data, u8 private_data_len)
345 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
346 kfree(cm_id_priv->private_data);
348 cm_id_priv->private_data = private_data;
349 cm_id_priv->private_data_len = private_data_len;
352 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
353 struct ib_grh *grh, struct cm_av *av)
356 av->pkey_index = wc->pkey_index;
357 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
361 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
363 struct cm_device *cm_dev;
364 struct cm_port *port = NULL;
369 read_lock_irqsave(&cm.device_lock, flags);
370 list_for_each_entry(cm_dev, &cm.device_list, list) {
371 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
373 port = cm_dev->port[p-1];
377 read_unlock_irqrestore(&cm.device_lock, flags);
382 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
383 be16_to_cpu(path->pkey), &av->pkey_index);
388 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
390 av->timeout = path->packet_life_time + 1;
394 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
401 spin_lock_irqsave(&cm.lock, flags);
402 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
405 next_id = ((unsigned) id + 1) & MAX_ID_MASK;
406 spin_unlock_irqrestore(&cm.lock, flags);
407 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
409 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
413 static void cm_free_id(__be32 local_id)
415 spin_lock_irq(&cm.lock);
416 idr_remove(&cm.local_id_table,
417 (__force int) (local_id ^ cm.random_id_operand));
418 spin_unlock_irq(&cm.lock);
421 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
423 struct cm_id_private *cm_id_priv;
425 cm_id_priv = idr_find(&cm.local_id_table,
426 (__force int) (local_id ^ cm.random_id_operand));
428 if (cm_id_priv->id.remote_id == remote_id)
429 atomic_inc(&cm_id_priv->refcount);
437 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
439 struct cm_id_private *cm_id_priv;
441 spin_lock_irq(&cm.lock);
442 cm_id_priv = cm_get_id(local_id, remote_id);
443 spin_unlock_irq(&cm.lock);
448 static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
452 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
453 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
454 ((unsigned long *) mask)[i];
457 static int cm_compare_data(struct ib_cm_compare_data *src_data,
458 struct ib_cm_compare_data *dst_data)
460 u8 src[IB_CM_COMPARE_SIZE];
461 u8 dst[IB_CM_COMPARE_SIZE];
463 if (!src_data || !dst_data)
466 cm_mask_copy(src, src_data->data, dst_data->mask);
467 cm_mask_copy(dst, dst_data->data, src_data->mask);
468 return memcmp(src, dst, IB_CM_COMPARE_SIZE);
471 static int cm_compare_private_data(u8 *private_data,
472 struct ib_cm_compare_data *dst_data)
474 u8 src[IB_CM_COMPARE_SIZE];
479 cm_mask_copy(src, private_data, dst_data->mask);
480 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
484 * Trivial helpers to strip endian annotation and compare; the
485 * endianness doesn't actually matter since we just need a stable
486 * order for the RB tree.
488 static int be32_lt(__be32 a, __be32 b)
490 return (__force u32) a < (__force u32) b;
493 static int be32_gt(__be32 a, __be32 b)
495 return (__force u32) a > (__force u32) b;
498 static int be64_lt(__be64 a, __be64 b)
500 return (__force u64) a < (__force u64) b;
503 static int be64_gt(__be64 a, __be64 b)
505 return (__force u64) a > (__force u64) b;
508 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
510 struct rb_node **link = &cm.listen_service_table.rb_node;
511 struct rb_node *parent = NULL;
512 struct cm_id_private *cur_cm_id_priv;
513 __be64 service_id = cm_id_priv->id.service_id;
514 __be64 service_mask = cm_id_priv->id.service_mask;
519 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
521 data_cmp = cm_compare_data(cm_id_priv->compare_data,
522 cur_cm_id_priv->compare_data);
523 if ((cur_cm_id_priv->id.service_mask & service_id) ==
524 (service_mask & cur_cm_id_priv->id.service_id) &&
525 (cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
527 return cur_cm_id_priv;
529 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
530 link = &(*link)->rb_left;
531 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
532 link = &(*link)->rb_right;
533 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
534 link = &(*link)->rb_left;
535 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
536 link = &(*link)->rb_right;
537 else if (data_cmp < 0)
538 link = &(*link)->rb_left;
540 link = &(*link)->rb_right;
542 rb_link_node(&cm_id_priv->service_node, parent, link);
543 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
547 static struct cm_id_private * cm_find_listen(struct ib_device *device,
551 struct rb_node *node = cm.listen_service_table.rb_node;
552 struct cm_id_private *cm_id_priv;
556 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
557 data_cmp = cm_compare_private_data(private_data,
558 cm_id_priv->compare_data);
559 if ((cm_id_priv->id.service_mask & service_id) ==
560 cm_id_priv->id.service_id &&
561 (cm_id_priv->id.device == device) && !data_cmp)
564 if (device < cm_id_priv->id.device)
565 node = node->rb_left;
566 else if (device > cm_id_priv->id.device)
567 node = node->rb_right;
568 else if (be64_lt(service_id, cm_id_priv->id.service_id))
569 node = node->rb_left;
570 else if (be64_gt(service_id, cm_id_priv->id.service_id))
571 node = node->rb_right;
572 else if (data_cmp < 0)
573 node = node->rb_left;
575 node = node->rb_right;
580 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
583 struct rb_node **link = &cm.remote_id_table.rb_node;
584 struct rb_node *parent = NULL;
585 struct cm_timewait_info *cur_timewait_info;
586 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
587 __be32 remote_id = timewait_info->work.remote_id;
591 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
593 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
594 link = &(*link)->rb_left;
595 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
596 link = &(*link)->rb_right;
597 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
598 link = &(*link)->rb_left;
599 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
600 link = &(*link)->rb_right;
602 return cur_timewait_info;
604 timewait_info->inserted_remote_id = 1;
605 rb_link_node(&timewait_info->remote_id_node, parent, link);
606 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
610 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
613 struct rb_node *node = cm.remote_id_table.rb_node;
614 struct cm_timewait_info *timewait_info;
617 timewait_info = rb_entry(node, struct cm_timewait_info,
619 if (be32_lt(remote_id, timewait_info->work.remote_id))
620 node = node->rb_left;
621 else if (be32_gt(remote_id, timewait_info->work.remote_id))
622 node = node->rb_right;
623 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
624 node = node->rb_left;
625 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
626 node = node->rb_right;
628 return timewait_info;
633 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
636 struct rb_node **link = &cm.remote_qp_table.rb_node;
637 struct rb_node *parent = NULL;
638 struct cm_timewait_info *cur_timewait_info;
639 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
640 __be32 remote_qpn = timewait_info->remote_qpn;
644 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
646 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
647 link = &(*link)->rb_left;
648 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
649 link = &(*link)->rb_right;
650 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
651 link = &(*link)->rb_left;
652 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
653 link = &(*link)->rb_right;
655 return cur_timewait_info;
657 timewait_info->inserted_remote_qp = 1;
658 rb_link_node(&timewait_info->remote_qp_node, parent, link);
659 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
663 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
666 struct rb_node **link = &cm.remote_sidr_table.rb_node;
667 struct rb_node *parent = NULL;
668 struct cm_id_private *cur_cm_id_priv;
669 union ib_gid *port_gid = &cm_id_priv->av.dgid;
670 __be32 remote_id = cm_id_priv->id.remote_id;
674 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
676 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
677 link = &(*link)->rb_left;
678 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
679 link = &(*link)->rb_right;
682 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
685 link = &(*link)->rb_left;
687 link = &(*link)->rb_right;
689 return cur_cm_id_priv;
692 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
693 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
697 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
698 enum ib_cm_sidr_status status)
700 struct ib_cm_sidr_rep_param param;
702 memset(¶m, 0, sizeof param);
703 param.status = status;
704 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
707 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
708 ib_cm_handler cm_handler,
711 struct cm_id_private *cm_id_priv;
714 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
716 return ERR_PTR(-ENOMEM);
718 cm_id_priv->id.state = IB_CM_IDLE;
719 cm_id_priv->id.device = device;
720 cm_id_priv->id.cm_handler = cm_handler;
721 cm_id_priv->id.context = context;
722 cm_id_priv->id.remote_cm_qpn = 1;
723 ret = cm_alloc_id(cm_id_priv);
727 spin_lock_init(&cm_id_priv->lock);
728 init_completion(&cm_id_priv->comp);
729 INIT_LIST_HEAD(&cm_id_priv->work_list);
730 atomic_set(&cm_id_priv->work_count, -1);
731 atomic_set(&cm_id_priv->refcount, 1);
732 return &cm_id_priv->id;
736 return ERR_PTR(-ENOMEM);
738 EXPORT_SYMBOL(ib_create_cm_id);
740 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
742 struct cm_work *work;
744 if (list_empty(&cm_id_priv->work_list))
747 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
748 list_del(&work->list);
752 static void cm_free_work(struct cm_work *work)
754 if (work->mad_recv_wc)
755 ib_free_recv_mad(work->mad_recv_wc);
759 static inline int cm_convert_to_ms(int iba_time)
761 /* approximate conversion to ms from 4.096us x 2^iba_time */
762 return 1 << max(iba_time - 8, 0);
766 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
767 * Because of how ack_timeout is stored, adding one doubles the timeout.
768 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
769 * increment it (round up) only if the other is within 50%.
771 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
773 int ack_timeout = packet_life_time + 1;
775 if (ack_timeout >= ca_ack_delay)
776 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
778 ack_timeout = ca_ack_delay +
779 (ack_timeout >= (ca_ack_delay - 1));
781 return min(31, ack_timeout);
784 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
786 if (timewait_info->inserted_remote_id) {
787 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
788 timewait_info->inserted_remote_id = 0;
791 if (timewait_info->inserted_remote_qp) {
792 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
793 timewait_info->inserted_remote_qp = 0;
797 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
799 struct cm_timewait_info *timewait_info;
801 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
803 return ERR_PTR(-ENOMEM);
805 timewait_info->work.local_id = local_id;
806 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
807 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
808 return timewait_info;
811 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
816 spin_lock_irqsave(&cm.lock, flags);
817 cm_cleanup_timewait(cm_id_priv->timewait_info);
818 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
819 spin_unlock_irqrestore(&cm.lock, flags);
822 * The cm_id could be destroyed by the user before we exit timewait.
823 * To protect against this, we search for the cm_id after exiting
824 * timewait before notifying the user that we've exited timewait.
826 cm_id_priv->id.state = IB_CM_TIMEWAIT;
827 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
828 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
829 msecs_to_jiffies(wait_time));
830 cm_id_priv->timewait_info = NULL;
833 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
837 cm_id_priv->id.state = IB_CM_IDLE;
838 if (cm_id_priv->timewait_info) {
839 spin_lock_irqsave(&cm.lock, flags);
840 cm_cleanup_timewait(cm_id_priv->timewait_info);
841 spin_unlock_irqrestore(&cm.lock, flags);
842 kfree(cm_id_priv->timewait_info);
843 cm_id_priv->timewait_info = NULL;
847 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
849 struct cm_id_private *cm_id_priv;
850 struct cm_work *work;
852 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
854 spin_lock_irq(&cm_id_priv->lock);
855 switch (cm_id->state) {
857 cm_id->state = IB_CM_IDLE;
858 spin_unlock_irq(&cm_id_priv->lock);
859 spin_lock_irq(&cm.lock);
860 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
861 spin_unlock_irq(&cm.lock);
863 case IB_CM_SIDR_REQ_SENT:
864 cm_id->state = IB_CM_IDLE;
865 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
866 spin_unlock_irq(&cm_id_priv->lock);
868 case IB_CM_SIDR_REQ_RCVD:
869 spin_unlock_irq(&cm_id_priv->lock);
870 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
873 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
874 spin_unlock_irq(&cm_id_priv->lock);
875 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
876 &cm_id_priv->id.device->node_guid,
877 sizeof cm_id_priv->id.device->node_guid,
881 if (err == -ENOMEM) {
882 /* Do not reject to allow future retries. */
883 cm_reset_to_idle(cm_id_priv);
884 spin_unlock_irq(&cm_id_priv->lock);
886 spin_unlock_irq(&cm_id_priv->lock);
887 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
891 case IB_CM_MRA_REQ_RCVD:
893 case IB_CM_MRA_REP_RCVD:
894 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
896 case IB_CM_MRA_REQ_SENT:
898 case IB_CM_MRA_REP_SENT:
899 spin_unlock_irq(&cm_id_priv->lock);
900 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
903 case IB_CM_ESTABLISHED:
904 spin_unlock_irq(&cm_id_priv->lock);
905 ib_send_cm_dreq(cm_id, NULL, 0);
907 case IB_CM_DREQ_SENT:
908 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
909 cm_enter_timewait(cm_id_priv);
910 spin_unlock_irq(&cm_id_priv->lock);
912 case IB_CM_DREQ_RCVD:
913 spin_unlock_irq(&cm_id_priv->lock);
914 ib_send_cm_drep(cm_id, NULL, 0);
917 spin_unlock_irq(&cm_id_priv->lock);
921 cm_free_id(cm_id->local_id);
922 cm_deref_id(cm_id_priv);
923 wait_for_completion(&cm_id_priv->comp);
924 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
926 kfree(cm_id_priv->compare_data);
927 kfree(cm_id_priv->private_data);
931 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
933 cm_destroy_id(cm_id, 0);
935 EXPORT_SYMBOL(ib_destroy_cm_id);
937 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
938 struct ib_cm_compare_data *compare_data)
940 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
944 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
945 service_id &= service_mask;
946 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
947 (service_id != IB_CM_ASSIGN_SERVICE_ID))
950 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
951 if (cm_id->state != IB_CM_IDLE)
955 cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
957 if (!cm_id_priv->compare_data)
959 cm_mask_copy(cm_id_priv->compare_data->data,
960 compare_data->data, compare_data->mask);
961 memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
965 cm_id->state = IB_CM_LISTEN;
967 spin_lock_irqsave(&cm.lock, flags);
968 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
969 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
970 cm_id->service_mask = ~cpu_to_be64(0);
972 cm_id->service_id = service_id;
973 cm_id->service_mask = service_mask;
975 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
976 spin_unlock_irqrestore(&cm.lock, flags);
978 if (cur_cm_id_priv) {
979 cm_id->state = IB_CM_IDLE;
980 kfree(cm_id_priv->compare_data);
981 cm_id_priv->compare_data = NULL;
986 EXPORT_SYMBOL(ib_cm_listen);
988 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
989 enum cm_msg_sequence msg_seq)
993 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
994 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
996 return cpu_to_be64(hi_tid | low_tid);
999 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1000 __be16 attr_id, __be64 tid)
1002 hdr->base_version = IB_MGMT_BASE_VERSION;
1003 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1004 hdr->class_version = IB_CM_CLASS_VERSION;
1005 hdr->method = IB_MGMT_METHOD_SEND;
1006 hdr->attr_id = attr_id;
1010 static void cm_format_req(struct cm_req_msg *req_msg,
1011 struct cm_id_private *cm_id_priv,
1012 struct ib_cm_req_param *param)
1014 struct ib_sa_path_rec *pri_path = param->primary_path;
1015 struct ib_sa_path_rec *alt_path = param->alternate_path;
1017 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1018 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1020 req_msg->local_comm_id = cm_id_priv->id.local_id;
1021 req_msg->service_id = param->service_id;
1022 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1023 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1024 cm_req_set_resp_res(req_msg, param->responder_resources);
1025 cm_req_set_init_depth(req_msg, param->initiator_depth);
1026 cm_req_set_remote_resp_timeout(req_msg,
1027 param->remote_cm_response_timeout);
1028 if (param->remote_cm_response_timeout > (u8) max_timeout) {
1029 printk(KERN_WARNING PFX "req remote_cm_response_timeout %d > "
1030 "%d, decreasing\n", param->remote_cm_response_timeout,
1032 cm_req_set_remote_resp_timeout(req_msg, (u8) max_timeout);
1034 cm_req_set_qp_type(req_msg, param->qp_type);
1035 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1036 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1037 cm_req_set_local_resp_timeout(req_msg,
1038 param->local_cm_response_timeout);
1039 if (param->local_cm_response_timeout > (u8) max_timeout) {
1040 printk(KERN_WARNING PFX "req local_cm_response_timeout %d > "
1041 "%d, decreasing\n", param->local_cm_response_timeout,
1043 cm_req_set_local_resp_timeout(req_msg, (u8) max_timeout);
1045 cm_req_set_retry_count(req_msg, param->retry_count);
1046 req_msg->pkey = param->primary_path->pkey;
1047 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1048 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1049 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1050 cm_req_set_srq(req_msg, param->srq);
1052 if (pri_path->hop_limit <= 1) {
1053 req_msg->primary_local_lid = pri_path->slid;
1054 req_msg->primary_remote_lid = pri_path->dlid;
1056 /* Work-around until there's a way to obtain remote LID info */
1057 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1058 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1060 req_msg->primary_local_gid = pri_path->sgid;
1061 req_msg->primary_remote_gid = pri_path->dgid;
1062 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1063 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1064 req_msg->primary_traffic_class = pri_path->traffic_class;
1065 req_msg->primary_hop_limit = pri_path->hop_limit;
1066 cm_req_set_primary_sl(req_msg, pri_path->sl);
1067 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1068 cm_req_set_primary_local_ack_timeout(req_msg,
1069 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1070 pri_path->packet_life_time));
1073 if (alt_path->hop_limit <= 1) {
1074 req_msg->alt_local_lid = alt_path->slid;
1075 req_msg->alt_remote_lid = alt_path->dlid;
1077 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1078 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1080 req_msg->alt_local_gid = alt_path->sgid;
1081 req_msg->alt_remote_gid = alt_path->dgid;
1082 cm_req_set_alt_flow_label(req_msg,
1083 alt_path->flow_label);
1084 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1085 req_msg->alt_traffic_class = alt_path->traffic_class;
1086 req_msg->alt_hop_limit = alt_path->hop_limit;
1087 cm_req_set_alt_sl(req_msg, alt_path->sl);
1088 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1089 cm_req_set_alt_local_ack_timeout(req_msg,
1090 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1091 alt_path->packet_life_time));
1094 if (param->private_data && param->private_data_len)
1095 memcpy(req_msg->private_data, param->private_data,
1096 param->private_data_len);
1099 static int cm_validate_req_param(struct ib_cm_req_param *param)
1101 /* peer-to-peer not supported */
1102 if (param->peer_to_peer)
1105 if (!param->primary_path)
1108 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
1111 if (param->private_data &&
1112 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1115 if (param->alternate_path &&
1116 (param->alternate_path->pkey != param->primary_path->pkey ||
1117 param->alternate_path->mtu != param->primary_path->mtu))
1123 int ib_send_cm_req(struct ib_cm_id *cm_id,
1124 struct ib_cm_req_param *param)
1126 struct cm_id_private *cm_id_priv;
1127 struct cm_req_msg *req_msg;
1128 unsigned long flags;
1131 ret = cm_validate_req_param(param);
1135 /* Verify that we're not in timewait. */
1136 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1137 spin_lock_irqsave(&cm_id_priv->lock, flags);
1138 if (cm_id->state != IB_CM_IDLE) {
1139 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1143 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1145 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1147 if (IS_ERR(cm_id_priv->timewait_info)) {
1148 ret = PTR_ERR(cm_id_priv->timewait_info);
1152 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
1155 if (param->alternate_path) {
1156 ret = cm_init_av_by_path(param->alternate_path,
1157 &cm_id_priv->alt_av);
1161 cm_id->service_id = param->service_id;
1162 cm_id->service_mask = ~cpu_to_be64(0);
1163 cm_id_priv->timeout_ms = cm_convert_to_ms(
1164 param->primary_path->packet_life_time) * 2 +
1166 param->remote_cm_response_timeout);
1167 if (cm_id_priv->timeout_ms > cm_convert_to_ms(max_timeout)) {
1168 printk(KERN_WARNING PFX "req timeout_ms %d > %d, decreasing\n",
1169 cm_id_priv->timeout_ms, cm_convert_to_ms(max_timeout));
1170 cm_id_priv->timeout_ms = cm_convert_to_ms(max_timeout);
1172 cm_id_priv->max_cm_retries = param->max_cm_retries;
1173 cm_id_priv->initiator_depth = param->initiator_depth;
1174 cm_id_priv->responder_resources = param->responder_resources;
1175 cm_id_priv->retry_count = param->retry_count;
1176 cm_id_priv->path_mtu = param->primary_path->mtu;
1177 cm_id_priv->pkey = param->primary_path->pkey;
1178 cm_id_priv->qp_type = param->qp_type;
1180 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1184 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1185 cm_format_req(req_msg, cm_id_priv, param);
1186 cm_id_priv->tid = req_msg->hdr.tid;
1187 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1188 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1190 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1191 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1193 spin_lock_irqsave(&cm_id_priv->lock, flags);
1194 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1196 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1199 BUG_ON(cm_id->state != IB_CM_IDLE);
1200 cm_id->state = IB_CM_REQ_SENT;
1201 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1204 error2: cm_free_msg(cm_id_priv->msg);
1205 error1: kfree(cm_id_priv->timewait_info);
1208 EXPORT_SYMBOL(ib_send_cm_req);
1210 static int cm_issue_rej(struct cm_port *port,
1211 struct ib_mad_recv_wc *mad_recv_wc,
1212 enum ib_cm_rej_reason reason,
1213 enum cm_msg_response msg_rejected,
1214 void *ari, u8 ari_length)
1216 struct ib_mad_send_buf *msg = NULL;
1217 struct cm_rej_msg *rej_msg, *rcv_msg;
1220 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1224 /* We just need common CM header information. Cast to any message. */
1225 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1226 rej_msg = (struct cm_rej_msg *) msg->mad;
1228 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1229 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1230 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1231 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1232 rej_msg->reason = cpu_to_be16(reason);
1234 if (ari && ari_length) {
1235 cm_rej_set_reject_info_len(rej_msg, ari_length);
1236 memcpy(rej_msg->ari, ari, ari_length);
1239 ret = ib_post_send_mad(msg, NULL);
1246 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1247 __be32 local_qpn, __be32 remote_qpn)
1249 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1250 ((local_ca_guid == remote_ca_guid) &&
1251 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1254 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1255 struct ib_sa_path_rec *primary_path,
1256 struct ib_sa_path_rec *alt_path)
1258 memset(primary_path, 0, sizeof *primary_path);
1259 primary_path->dgid = req_msg->primary_local_gid;
1260 primary_path->sgid = req_msg->primary_remote_gid;
1261 primary_path->dlid = req_msg->primary_local_lid;
1262 primary_path->slid = req_msg->primary_remote_lid;
1263 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1264 primary_path->hop_limit = req_msg->primary_hop_limit;
1265 primary_path->traffic_class = req_msg->primary_traffic_class;
1266 primary_path->reversible = 1;
1267 primary_path->pkey = req_msg->pkey;
1268 primary_path->sl = cm_req_get_primary_sl(req_msg);
1269 primary_path->mtu_selector = IB_SA_EQ;
1270 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1271 primary_path->rate_selector = IB_SA_EQ;
1272 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1273 primary_path->packet_life_time_selector = IB_SA_EQ;
1274 primary_path->packet_life_time =
1275 cm_req_get_primary_local_ack_timeout(req_msg);
1276 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1278 if (req_msg->alt_local_lid) {
1279 memset(alt_path, 0, sizeof *alt_path);
1280 alt_path->dgid = req_msg->alt_local_gid;
1281 alt_path->sgid = req_msg->alt_remote_gid;
1282 alt_path->dlid = req_msg->alt_local_lid;
1283 alt_path->slid = req_msg->alt_remote_lid;
1284 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1285 alt_path->hop_limit = req_msg->alt_hop_limit;
1286 alt_path->traffic_class = req_msg->alt_traffic_class;
1287 alt_path->reversible = 1;
1288 alt_path->pkey = req_msg->pkey;
1289 alt_path->sl = cm_req_get_alt_sl(req_msg);
1290 alt_path->mtu_selector = IB_SA_EQ;
1291 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1292 alt_path->rate_selector = IB_SA_EQ;
1293 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1294 alt_path->packet_life_time_selector = IB_SA_EQ;
1295 alt_path->packet_life_time =
1296 cm_req_get_alt_local_ack_timeout(req_msg);
1297 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1301 static void cm_format_req_event(struct cm_work *work,
1302 struct cm_id_private *cm_id_priv,
1303 struct ib_cm_id *listen_id)
1305 struct cm_req_msg *req_msg;
1306 struct ib_cm_req_event_param *param;
1308 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1309 param = &work->cm_event.param.req_rcvd;
1310 param->listen_id = listen_id;
1311 param->port = cm_id_priv->av.port->port_num;
1312 param->primary_path = &work->path[0];
1313 if (req_msg->alt_local_lid)
1314 param->alternate_path = &work->path[1];
1316 param->alternate_path = NULL;
1317 param->remote_ca_guid = req_msg->local_ca_guid;
1318 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1319 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1320 param->qp_type = cm_req_get_qp_type(req_msg);
1321 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1322 param->responder_resources = cm_req_get_init_depth(req_msg);
1323 param->initiator_depth = cm_req_get_resp_res(req_msg);
1324 param->local_cm_response_timeout =
1325 cm_req_get_remote_resp_timeout(req_msg);
1326 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1327 param->remote_cm_response_timeout =
1328 cm_req_get_local_resp_timeout(req_msg);
1329 param->retry_count = cm_req_get_retry_count(req_msg);
1330 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1331 param->srq = cm_req_get_srq(req_msg);
1332 work->cm_event.private_data = &req_msg->private_data;
1335 static void cm_process_work(struct cm_id_private *cm_id_priv,
1336 struct cm_work *work)
1340 /* We will typically only have the current event to report. */
1341 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1344 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1345 spin_lock_irq(&cm_id_priv->lock);
1346 work = cm_dequeue_work(cm_id_priv);
1347 spin_unlock_irq(&cm_id_priv->lock);
1349 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1353 cm_deref_id(cm_id_priv);
1355 cm_destroy_id(&cm_id_priv->id, ret);
1358 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1359 struct cm_id_private *cm_id_priv,
1360 enum cm_msg_response msg_mraed, u8 service_timeout,
1361 const void *private_data, u8 private_data_len)
1363 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1364 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1365 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1366 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1367 cm_mra_set_service_timeout(mra_msg, service_timeout);
1369 if (private_data && private_data_len)
1370 memcpy(mra_msg->private_data, private_data, private_data_len);
1373 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1374 struct cm_id_private *cm_id_priv,
1375 enum ib_cm_rej_reason reason,
1378 const void *private_data,
1379 u8 private_data_len)
1381 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1382 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1384 switch(cm_id_priv->id.state) {
1385 case IB_CM_REQ_RCVD:
1386 rej_msg->local_comm_id = 0;
1387 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1389 case IB_CM_MRA_REQ_SENT:
1390 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1391 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1393 case IB_CM_REP_RCVD:
1394 case IB_CM_MRA_REP_SENT:
1395 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1396 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1399 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1400 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1404 rej_msg->reason = cpu_to_be16(reason);
1405 if (ari && ari_length) {
1406 cm_rej_set_reject_info_len(rej_msg, ari_length);
1407 memcpy(rej_msg->ari, ari, ari_length);
1410 if (private_data && private_data_len)
1411 memcpy(rej_msg->private_data, private_data, private_data_len);
1414 static void cm_dup_req_handler(struct cm_work *work,
1415 struct cm_id_private *cm_id_priv)
1417 struct ib_mad_send_buf *msg = NULL;
1420 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1421 counter[CM_REQ_COUNTER]);
1423 /* Quick state check to discard duplicate REQs. */
1424 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1427 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1431 spin_lock_irq(&cm_id_priv->lock);
1432 switch (cm_id_priv->id.state) {
1433 case IB_CM_MRA_REQ_SENT:
1434 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1435 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1436 cm_id_priv->private_data,
1437 cm_id_priv->private_data_len);
1439 case IB_CM_TIMEWAIT:
1440 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1441 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1446 spin_unlock_irq(&cm_id_priv->lock);
1448 ret = ib_post_send_mad(msg, NULL);
1453 unlock: spin_unlock_irq(&cm_id_priv->lock);
1454 free: cm_free_msg(msg);
1457 static struct cm_id_private * cm_match_req(struct cm_work *work,
1458 struct cm_id_private *cm_id_priv)
1460 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1461 struct cm_timewait_info *timewait_info;
1462 struct cm_req_msg *req_msg;
1464 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1466 /* Check for possible duplicate REQ. */
1467 spin_lock_irq(&cm.lock);
1468 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1469 if (timewait_info) {
1470 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1471 timewait_info->work.remote_id);
1472 spin_unlock_irq(&cm.lock);
1473 if (cur_cm_id_priv) {
1474 cm_dup_req_handler(work, cur_cm_id_priv);
1475 cm_deref_id(cur_cm_id_priv);
1480 /* Check for stale connections. */
1481 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1482 if (timewait_info) {
1483 cm_cleanup_timewait(cm_id_priv->timewait_info);
1484 spin_unlock_irq(&cm.lock);
1485 cm_issue_rej(work->port, work->mad_recv_wc,
1486 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1491 /* Find matching listen request. */
1492 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1493 req_msg->service_id,
1494 req_msg->private_data);
1495 if (!listen_cm_id_priv) {
1496 cm_cleanup_timewait(cm_id_priv->timewait_info);
1497 spin_unlock_irq(&cm.lock);
1498 cm_issue_rej(work->port, work->mad_recv_wc,
1499 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1503 atomic_inc(&listen_cm_id_priv->refcount);
1504 atomic_inc(&cm_id_priv->refcount);
1505 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1506 atomic_inc(&cm_id_priv->work_count);
1507 spin_unlock_irq(&cm.lock);
1509 return listen_cm_id_priv;
1513 * Work-around for inter-subnet connections. If the LIDs are permissive,
1514 * we need to override the LID/SL data in the REQ with the LID information
1515 * in the work completion.
1517 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1519 if (!cm_req_get_primary_subnet_local(req_msg)) {
1520 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1521 req_msg->primary_local_lid = cpu_to_be16(wc->slid);
1522 cm_req_set_primary_sl(req_msg, wc->sl);
1525 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1526 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1529 if (!cm_req_get_alt_subnet_local(req_msg)) {
1530 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1531 req_msg->alt_local_lid = cpu_to_be16(wc->slid);
1532 cm_req_set_alt_sl(req_msg, wc->sl);
1535 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1536 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1540 static int cm_req_handler(struct cm_work *work)
1542 struct ib_cm_id *cm_id;
1543 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1544 struct cm_req_msg *req_msg;
1547 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1549 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1551 return PTR_ERR(cm_id);
1553 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1554 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1555 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1556 work->mad_recv_wc->recv_buf.grh,
1558 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1560 if (IS_ERR(cm_id_priv->timewait_info)) {
1561 ret = PTR_ERR(cm_id_priv->timewait_info);
1564 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1565 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1566 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1568 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1569 if (!listen_cm_id_priv) {
1571 kfree(cm_id_priv->timewait_info);
1575 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1576 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1577 cm_id_priv->id.service_id = req_msg->service_id;
1578 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1580 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1581 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1582 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1584 ib_get_cached_gid(work->port->cm_dev->ib_device,
1585 work->port->port_num, 0, &work->path[0].sgid);
1586 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1587 &work->path[0].sgid, sizeof work->path[0].sgid,
1591 if (req_msg->alt_local_lid) {
1592 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1594 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1595 &work->path[0].sgid,
1596 sizeof work->path[0].sgid, NULL, 0);
1600 cm_id_priv->tid = req_msg->hdr.tid;
1601 cm_id_priv->timeout_ms = cm_convert_to_ms(
1602 cm_req_get_local_resp_timeout(req_msg));
1603 if (cm_req_get_local_resp_timeout(req_msg) > (u8) max_timeout) {
1604 printk(KERN_WARNING PFX "rcvd cm_local_resp_timeout %d > %d, "
1605 "decreasing used timeout_ms\n",
1606 cm_req_get_local_resp_timeout(req_msg), max_timeout);
1607 cm_id_priv->timeout_ms = cm_convert_to_ms(max_timeout);
1610 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1611 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1612 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1613 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1614 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1615 cm_id_priv->pkey = req_msg->pkey;
1616 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1617 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1618 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1619 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1621 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1622 cm_process_work(cm_id_priv, work);
1623 cm_deref_id(listen_cm_id_priv);
1627 atomic_dec(&cm_id_priv->refcount);
1628 cm_deref_id(listen_cm_id_priv);
1630 ib_destroy_cm_id(cm_id);
1634 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1635 struct cm_id_private *cm_id_priv,
1636 struct ib_cm_rep_param *param)
1638 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1639 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1640 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1641 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1642 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1643 rep_msg->resp_resources = param->responder_resources;
1644 rep_msg->initiator_depth = param->initiator_depth;
1645 cm_rep_set_target_ack_delay(rep_msg,
1646 cm_id_priv->av.port->cm_dev->ack_delay);
1647 cm_rep_set_failover(rep_msg, param->failover_accepted);
1648 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1649 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1650 cm_rep_set_srq(rep_msg, param->srq);
1651 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1653 if (param->private_data && param->private_data_len)
1654 memcpy(rep_msg->private_data, param->private_data,
1655 param->private_data_len);
1658 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1659 struct ib_cm_rep_param *param)
1661 struct cm_id_private *cm_id_priv;
1662 struct ib_mad_send_buf *msg;
1663 struct cm_rep_msg *rep_msg;
1664 unsigned long flags;
1667 if (param->private_data &&
1668 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1671 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1672 spin_lock_irqsave(&cm_id_priv->lock, flags);
1673 if (cm_id->state != IB_CM_REQ_RCVD &&
1674 cm_id->state != IB_CM_MRA_REQ_SENT) {
1679 ret = cm_alloc_msg(cm_id_priv, &msg);
1683 rep_msg = (struct cm_rep_msg *) msg->mad;
1684 cm_format_rep(rep_msg, cm_id_priv, param);
1685 msg->timeout_ms = cm_id_priv->timeout_ms;
1686 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1688 ret = ib_post_send_mad(msg, NULL);
1690 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1695 cm_id->state = IB_CM_REP_SENT;
1696 cm_id_priv->msg = msg;
1697 cm_id_priv->initiator_depth = param->initiator_depth;
1698 cm_id_priv->responder_resources = param->responder_resources;
1699 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1700 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1702 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1705 EXPORT_SYMBOL(ib_send_cm_rep);
1707 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1708 struct cm_id_private *cm_id_priv,
1709 const void *private_data,
1710 u8 private_data_len)
1712 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1713 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1714 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1716 if (private_data && private_data_len)
1717 memcpy(rtu_msg->private_data, private_data, private_data_len);
1720 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1721 const void *private_data,
1722 u8 private_data_len)
1724 struct cm_id_private *cm_id_priv;
1725 struct ib_mad_send_buf *msg;
1726 unsigned long flags;
1730 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1733 data = cm_copy_private_data(private_data, private_data_len);
1735 return PTR_ERR(data);
1737 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1738 spin_lock_irqsave(&cm_id_priv->lock, flags);
1739 if (cm_id->state != IB_CM_REP_RCVD &&
1740 cm_id->state != IB_CM_MRA_REP_SENT) {
1745 ret = cm_alloc_msg(cm_id_priv, &msg);
1749 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1750 private_data, private_data_len);
1752 ret = ib_post_send_mad(msg, NULL);
1754 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1760 cm_id->state = IB_CM_ESTABLISHED;
1761 cm_set_private_data(cm_id_priv, data, private_data_len);
1762 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1765 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1769 EXPORT_SYMBOL(ib_send_cm_rtu);
1771 static void cm_format_rep_event(struct cm_work *work)
1773 struct cm_rep_msg *rep_msg;
1774 struct ib_cm_rep_event_param *param;
1776 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1777 param = &work->cm_event.param.rep_rcvd;
1778 param->remote_ca_guid = rep_msg->local_ca_guid;
1779 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1780 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1781 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1782 param->responder_resources = rep_msg->initiator_depth;
1783 param->initiator_depth = rep_msg->resp_resources;
1784 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1785 param->failover_accepted = cm_rep_get_failover(rep_msg);
1786 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1787 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1788 param->srq = cm_rep_get_srq(rep_msg);
1789 work->cm_event.private_data = &rep_msg->private_data;
1792 static void cm_dup_rep_handler(struct cm_work *work)
1794 struct cm_id_private *cm_id_priv;
1795 struct cm_rep_msg *rep_msg;
1796 struct ib_mad_send_buf *msg = NULL;
1799 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1800 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1801 rep_msg->local_comm_id);
1805 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1806 counter[CM_REP_COUNTER]);
1807 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1811 spin_lock_irq(&cm_id_priv->lock);
1812 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1813 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1814 cm_id_priv->private_data,
1815 cm_id_priv->private_data_len);
1816 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1817 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1818 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1819 cm_id_priv->private_data,
1820 cm_id_priv->private_data_len);
1823 spin_unlock_irq(&cm_id_priv->lock);
1825 ret = ib_post_send_mad(msg, NULL);
1830 unlock: spin_unlock_irq(&cm_id_priv->lock);
1831 free: cm_free_msg(msg);
1832 deref: cm_deref_id(cm_id_priv);
1835 static int cm_rep_handler(struct cm_work *work)
1837 struct cm_id_private *cm_id_priv;
1838 struct cm_rep_msg *rep_msg;
1841 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1842 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1844 cm_dup_rep_handler(work);
1848 cm_format_rep_event(work);
1850 spin_lock_irq(&cm_id_priv->lock);
1851 switch (cm_id_priv->id.state) {
1852 case IB_CM_REQ_SENT:
1853 case IB_CM_MRA_REQ_RCVD:
1856 spin_unlock_irq(&cm_id_priv->lock);
1861 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1862 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1863 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1865 spin_lock(&cm.lock);
1866 /* Check for duplicate REP. */
1867 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1868 spin_unlock(&cm.lock);
1869 spin_unlock_irq(&cm_id_priv->lock);
1873 /* Check for a stale connection. */
1874 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1875 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
1876 &cm.remote_id_table);
1877 cm_id_priv->timewait_info->inserted_remote_id = 0;
1878 spin_unlock(&cm.lock);
1879 spin_unlock_irq(&cm_id_priv->lock);
1880 cm_issue_rej(work->port, work->mad_recv_wc,
1881 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1886 spin_unlock(&cm.lock);
1888 cm_id_priv->id.state = IB_CM_REP_RCVD;
1889 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1890 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1891 cm_id_priv->initiator_depth = rep_msg->resp_resources;
1892 cm_id_priv->responder_resources = rep_msg->initiator_depth;
1893 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1894 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1895 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1896 cm_id_priv->av.timeout =
1897 cm_ack_timeout(cm_id_priv->target_ack_delay,
1898 cm_id_priv->av.timeout - 1);
1899 cm_id_priv->alt_av.timeout =
1900 cm_ack_timeout(cm_id_priv->target_ack_delay,
1901 cm_id_priv->alt_av.timeout - 1);
1903 /* todo: handle peer_to_peer */
1905 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1906 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1908 list_add_tail(&work->list, &cm_id_priv->work_list);
1909 spin_unlock_irq(&cm_id_priv->lock);
1912 cm_process_work(cm_id_priv, work);
1914 cm_deref_id(cm_id_priv);
1918 cm_deref_id(cm_id_priv);
1922 static int cm_establish_handler(struct cm_work *work)
1924 struct cm_id_private *cm_id_priv;
1927 /* See comment in cm_establish about lookup. */
1928 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1932 spin_lock_irq(&cm_id_priv->lock);
1933 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1934 spin_unlock_irq(&cm_id_priv->lock);
1938 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1939 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1941 list_add_tail(&work->list, &cm_id_priv->work_list);
1942 spin_unlock_irq(&cm_id_priv->lock);
1945 cm_process_work(cm_id_priv, work);
1947 cm_deref_id(cm_id_priv);
1950 cm_deref_id(cm_id_priv);
1954 static int cm_rtu_handler(struct cm_work *work)
1956 struct cm_id_private *cm_id_priv;
1957 struct cm_rtu_msg *rtu_msg;
1960 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1961 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1962 rtu_msg->local_comm_id);
1966 work->cm_event.private_data = &rtu_msg->private_data;
1968 spin_lock_irq(&cm_id_priv->lock);
1969 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1970 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1971 spin_unlock_irq(&cm_id_priv->lock);
1972 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1973 counter[CM_RTU_COUNTER]);
1976 cm_id_priv->id.state = IB_CM_ESTABLISHED;
1978 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1979 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1981 list_add_tail(&work->list, &cm_id_priv->work_list);
1982 spin_unlock_irq(&cm_id_priv->lock);
1985 cm_process_work(cm_id_priv, work);
1987 cm_deref_id(cm_id_priv);
1990 cm_deref_id(cm_id_priv);
1994 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1995 struct cm_id_private *cm_id_priv,
1996 const void *private_data,
1997 u8 private_data_len)
1999 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2000 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2001 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2002 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2003 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2005 if (private_data && private_data_len)
2006 memcpy(dreq_msg->private_data, private_data, private_data_len);
2009 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2010 const void *private_data,
2011 u8 private_data_len)
2013 struct cm_id_private *cm_id_priv;
2014 struct ib_mad_send_buf *msg;
2015 unsigned long flags;
2018 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2021 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2022 spin_lock_irqsave(&cm_id_priv->lock, flags);
2023 if (cm_id->state != IB_CM_ESTABLISHED) {
2028 ret = cm_alloc_msg(cm_id_priv, &msg);
2030 cm_enter_timewait(cm_id_priv);
2034 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2035 private_data, private_data_len);
2036 msg->timeout_ms = cm_id_priv->timeout_ms;
2037 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2039 ret = ib_post_send_mad(msg, NULL);
2041 cm_enter_timewait(cm_id_priv);
2042 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2047 cm_id->state = IB_CM_DREQ_SENT;
2048 cm_id_priv->msg = msg;
2049 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2052 EXPORT_SYMBOL(ib_send_cm_dreq);
2054 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2055 struct cm_id_private *cm_id_priv,
2056 const void *private_data,
2057 u8 private_data_len)
2059 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2060 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2061 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2063 if (private_data && private_data_len)
2064 memcpy(drep_msg->private_data, private_data, private_data_len);
2067 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2068 const void *private_data,
2069 u8 private_data_len)
2071 struct cm_id_private *cm_id_priv;
2072 struct ib_mad_send_buf *msg;
2073 unsigned long flags;
2077 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2080 data = cm_copy_private_data(private_data, private_data_len);
2082 return PTR_ERR(data);
2084 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2085 spin_lock_irqsave(&cm_id_priv->lock, flags);
2086 if (cm_id->state != IB_CM_DREQ_RCVD) {
2087 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2092 cm_set_private_data(cm_id_priv, data, private_data_len);
2093 cm_enter_timewait(cm_id_priv);
2095 ret = cm_alloc_msg(cm_id_priv, &msg);
2099 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2100 private_data, private_data_len);
2102 ret = ib_post_send_mad(msg, NULL);
2104 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2109 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2112 EXPORT_SYMBOL(ib_send_cm_drep);
2114 static int cm_issue_drep(struct cm_port *port,
2115 struct ib_mad_recv_wc *mad_recv_wc)
2117 struct ib_mad_send_buf *msg = NULL;
2118 struct cm_dreq_msg *dreq_msg;
2119 struct cm_drep_msg *drep_msg;
2122 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2126 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2127 drep_msg = (struct cm_drep_msg *) msg->mad;
2129 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2130 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2131 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2133 ret = ib_post_send_mad(msg, NULL);
2140 static int cm_dreq_handler(struct cm_work *work)
2142 struct cm_id_private *cm_id_priv;
2143 struct cm_dreq_msg *dreq_msg;
2144 struct ib_mad_send_buf *msg = NULL;
2147 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2148 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2149 dreq_msg->local_comm_id);
2151 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2152 counter[CM_DREQ_COUNTER]);
2153 cm_issue_drep(work->port, work->mad_recv_wc);
2157 work->cm_event.private_data = &dreq_msg->private_data;
2159 spin_lock_irq(&cm_id_priv->lock);
2160 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2163 switch (cm_id_priv->id.state) {
2164 case IB_CM_REP_SENT:
2165 case IB_CM_DREQ_SENT:
2166 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2168 case IB_CM_ESTABLISHED:
2169 case IB_CM_MRA_REP_RCVD:
2171 case IB_CM_TIMEWAIT:
2172 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2173 counter[CM_DREQ_COUNTER]);
2174 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2177 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2178 cm_id_priv->private_data,
2179 cm_id_priv->private_data_len);
2180 spin_unlock_irq(&cm_id_priv->lock);
2182 if (ib_post_send_mad(msg, NULL))
2185 case IB_CM_DREQ_RCVD:
2186 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2187 counter[CM_DREQ_COUNTER]);
2192 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2193 cm_id_priv->tid = dreq_msg->hdr.tid;
2194 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2196 list_add_tail(&work->list, &cm_id_priv->work_list);
2197 spin_unlock_irq(&cm_id_priv->lock);
2200 cm_process_work(cm_id_priv, work);
2202 cm_deref_id(cm_id_priv);
2205 unlock: spin_unlock_irq(&cm_id_priv->lock);
2206 deref: cm_deref_id(cm_id_priv);
2210 static int cm_drep_handler(struct cm_work *work)
2212 struct cm_id_private *cm_id_priv;
2213 struct cm_drep_msg *drep_msg;
2216 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2217 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2218 drep_msg->local_comm_id);
2222 work->cm_event.private_data = &drep_msg->private_data;
2224 spin_lock_irq(&cm_id_priv->lock);
2225 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2226 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2227 spin_unlock_irq(&cm_id_priv->lock);
2230 cm_enter_timewait(cm_id_priv);
2232 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2233 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2235 list_add_tail(&work->list, &cm_id_priv->work_list);
2236 spin_unlock_irq(&cm_id_priv->lock);
2239 cm_process_work(cm_id_priv, work);
2241 cm_deref_id(cm_id_priv);
2244 cm_deref_id(cm_id_priv);
2248 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2249 enum ib_cm_rej_reason reason,
2252 const void *private_data,
2253 u8 private_data_len)
2255 struct cm_id_private *cm_id_priv;
2256 struct ib_mad_send_buf *msg;
2257 unsigned long flags;
2260 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2261 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2264 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2266 spin_lock_irqsave(&cm_id_priv->lock, flags);
2267 switch (cm_id->state) {
2268 case IB_CM_REQ_SENT:
2269 case IB_CM_MRA_REQ_RCVD:
2270 case IB_CM_REQ_RCVD:
2271 case IB_CM_MRA_REQ_SENT:
2272 case IB_CM_REP_RCVD:
2273 case IB_CM_MRA_REP_SENT:
2274 ret = cm_alloc_msg(cm_id_priv, &msg);
2276 cm_format_rej((struct cm_rej_msg *) msg->mad,
2277 cm_id_priv, reason, ari, ari_length,
2278 private_data, private_data_len);
2280 cm_reset_to_idle(cm_id_priv);
2282 case IB_CM_REP_SENT:
2283 case IB_CM_MRA_REP_RCVD:
2284 ret = cm_alloc_msg(cm_id_priv, &msg);
2286 cm_format_rej((struct cm_rej_msg *) msg->mad,
2287 cm_id_priv, reason, ari, ari_length,
2288 private_data, private_data_len);
2290 cm_enter_timewait(cm_id_priv);
2300 ret = ib_post_send_mad(msg, NULL);
2304 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2307 EXPORT_SYMBOL(ib_send_cm_rej);
2309 static void cm_format_rej_event(struct cm_work *work)
2311 struct cm_rej_msg *rej_msg;
2312 struct ib_cm_rej_event_param *param;
2314 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2315 param = &work->cm_event.param.rej_rcvd;
2316 param->ari = rej_msg->ari;
2317 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2318 param->reason = __be16_to_cpu(rej_msg->reason);
2319 work->cm_event.private_data = &rej_msg->private_data;
2322 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2324 struct cm_timewait_info *timewait_info;
2325 struct cm_id_private *cm_id_priv;
2328 remote_id = rej_msg->local_comm_id;
2330 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2331 spin_lock_irq(&cm.lock);
2332 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2334 if (!timewait_info) {
2335 spin_unlock_irq(&cm.lock);
2338 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2339 (timewait_info->work.local_id ^
2340 cm.random_id_operand));
2342 if (cm_id_priv->id.remote_id == remote_id)
2343 atomic_inc(&cm_id_priv->refcount);
2347 spin_unlock_irq(&cm.lock);
2348 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2349 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2351 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2356 static int cm_rej_handler(struct cm_work *work)
2358 struct cm_id_private *cm_id_priv;
2359 struct cm_rej_msg *rej_msg;
2362 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2363 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2367 cm_format_rej_event(work);
2369 spin_lock_irq(&cm_id_priv->lock);
2370 switch (cm_id_priv->id.state) {
2371 case IB_CM_REQ_SENT:
2372 case IB_CM_MRA_REQ_RCVD:
2373 case IB_CM_REP_SENT:
2374 case IB_CM_MRA_REP_RCVD:
2375 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2377 case IB_CM_REQ_RCVD:
2378 case IB_CM_MRA_REQ_SENT:
2379 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2380 cm_enter_timewait(cm_id_priv);
2382 cm_reset_to_idle(cm_id_priv);
2384 case IB_CM_DREQ_SENT:
2385 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2387 case IB_CM_REP_RCVD:
2388 case IB_CM_MRA_REP_SENT:
2389 case IB_CM_ESTABLISHED:
2390 cm_enter_timewait(cm_id_priv);
2393 spin_unlock_irq(&cm_id_priv->lock);
2398 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2400 list_add_tail(&work->list, &cm_id_priv->work_list);
2401 spin_unlock_irq(&cm_id_priv->lock);
2404 cm_process_work(cm_id_priv, work);
2406 cm_deref_id(cm_id_priv);
2409 cm_deref_id(cm_id_priv);
2413 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2415 const void *private_data,
2416 u8 private_data_len)
2418 struct cm_id_private *cm_id_priv;
2419 struct ib_mad_send_buf *msg;
2420 enum ib_cm_state cm_state;
2421 enum ib_cm_lap_state lap_state;
2422 enum cm_msg_response msg_response;
2424 unsigned long flags;
2427 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2430 data = cm_copy_private_data(private_data, private_data_len);
2432 return PTR_ERR(data);
2434 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2436 spin_lock_irqsave(&cm_id_priv->lock, flags);
2437 switch(cm_id_priv->id.state) {
2438 case IB_CM_REQ_RCVD:
2439 cm_state = IB_CM_MRA_REQ_SENT;
2440 lap_state = cm_id->lap_state;
2441 msg_response = CM_MSG_RESPONSE_REQ;
2443 case IB_CM_REP_RCVD:
2444 cm_state = IB_CM_MRA_REP_SENT;
2445 lap_state = cm_id->lap_state;
2446 msg_response = CM_MSG_RESPONSE_REP;
2448 case IB_CM_ESTABLISHED:
2449 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2450 cm_state = cm_id->state;
2451 lap_state = IB_CM_MRA_LAP_SENT;
2452 msg_response = CM_MSG_RESPONSE_OTHER;
2460 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2461 ret = cm_alloc_msg(cm_id_priv, &msg);
2465 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2466 msg_response, service_timeout,
2467 private_data, private_data_len);
2468 ret = ib_post_send_mad(msg, NULL);
2473 cm_id->state = cm_state;
2474 cm_id->lap_state = lap_state;
2475 cm_id_priv->service_timeout = service_timeout;
2476 cm_set_private_data(cm_id_priv, data, private_data_len);
2477 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2480 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2484 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2489 EXPORT_SYMBOL(ib_send_cm_mra);
2491 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2493 switch (cm_mra_get_msg_mraed(mra_msg)) {
2494 case CM_MSG_RESPONSE_REQ:
2495 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2496 case CM_MSG_RESPONSE_REP:
2497 case CM_MSG_RESPONSE_OTHER:
2498 return cm_acquire_id(mra_msg->remote_comm_id,
2499 mra_msg->local_comm_id);
2505 static int cm_mra_handler(struct cm_work *work)
2507 struct cm_id_private *cm_id_priv;
2508 struct cm_mra_msg *mra_msg;
2511 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2512 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2516 work->cm_event.private_data = &mra_msg->private_data;
2517 work->cm_event.param.mra_rcvd.service_timeout =
2518 cm_mra_get_service_timeout(mra_msg);
2519 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2520 cm_convert_to_ms(cm_id_priv->av.timeout);
2521 if (timeout > cm_convert_to_ms(max_timeout)) {
2522 printk(KERN_WARNING PFX "calculated mra timeout %d > %d, "
2523 "decreasing used timeout_ms\n", timeout,
2524 cm_convert_to_ms(max_timeout));
2525 timeout = cm_convert_to_ms(max_timeout);
2528 spin_lock_irq(&cm_id_priv->lock);
2529 switch (cm_id_priv->id.state) {
2530 case IB_CM_REQ_SENT:
2531 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2532 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2533 cm_id_priv->msg, timeout))
2535 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2537 case IB_CM_REP_SENT:
2538 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2539 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2540 cm_id_priv->msg, timeout))
2542 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2544 case IB_CM_ESTABLISHED:
2545 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2546 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2547 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2548 cm_id_priv->msg, timeout)) {
2549 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2550 atomic_long_inc(&work->port->
2551 counter_group[CM_RECV_DUPLICATES].
2552 counter[CM_MRA_COUNTER]);
2555 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2557 case IB_CM_MRA_REQ_RCVD:
2558 case IB_CM_MRA_REP_RCVD:
2559 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2560 counter[CM_MRA_COUNTER]);
2566 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2567 cm_id_priv->id.state;
2568 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2570 list_add_tail(&work->list, &cm_id_priv->work_list);
2571 spin_unlock_irq(&cm_id_priv->lock);
2574 cm_process_work(cm_id_priv, work);
2576 cm_deref_id(cm_id_priv);
2579 spin_unlock_irq(&cm_id_priv->lock);
2580 cm_deref_id(cm_id_priv);
2584 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2585 struct cm_id_private *cm_id_priv,
2586 struct ib_sa_path_rec *alternate_path,
2587 const void *private_data,
2588 u8 private_data_len)
2590 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2591 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2592 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2593 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2594 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2595 /* todo: need remote CM response timeout */
2596 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2597 lap_msg->alt_local_lid = alternate_path->slid;
2598 lap_msg->alt_remote_lid = alternate_path->dlid;
2599 lap_msg->alt_local_gid = alternate_path->sgid;
2600 lap_msg->alt_remote_gid = alternate_path->dgid;
2601 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2602 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2603 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2604 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2605 cm_lap_set_sl(lap_msg, alternate_path->sl);
2606 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2607 cm_lap_set_local_ack_timeout(lap_msg,
2608 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2609 alternate_path->packet_life_time));
2611 if (private_data && private_data_len)
2612 memcpy(lap_msg->private_data, private_data, private_data_len);
2615 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2616 struct ib_sa_path_rec *alternate_path,
2617 const void *private_data,
2618 u8 private_data_len)
2620 struct cm_id_private *cm_id_priv;
2621 struct ib_mad_send_buf *msg;
2622 unsigned long flags;
2625 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2628 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2629 spin_lock_irqsave(&cm_id_priv->lock, flags);
2630 if (cm_id->state != IB_CM_ESTABLISHED ||
2631 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2632 cm_id->lap_state != IB_CM_LAP_IDLE)) {
2637 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
2640 cm_id_priv->alt_av.timeout =
2641 cm_ack_timeout(cm_id_priv->target_ack_delay,
2642 cm_id_priv->alt_av.timeout - 1);
2644 ret = cm_alloc_msg(cm_id_priv, &msg);
2648 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2649 alternate_path, private_data, private_data_len);
2650 msg->timeout_ms = cm_id_priv->timeout_ms;
2651 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2653 ret = ib_post_send_mad(msg, NULL);
2655 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2660 cm_id->lap_state = IB_CM_LAP_SENT;
2661 cm_id_priv->msg = msg;
2663 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2666 EXPORT_SYMBOL(ib_send_cm_lap);
2668 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2669 struct ib_sa_path_rec *path,
2670 struct cm_lap_msg *lap_msg)
2672 memset(path, 0, sizeof *path);
2673 path->dgid = lap_msg->alt_local_gid;
2674 path->sgid = lap_msg->alt_remote_gid;
2675 path->dlid = lap_msg->alt_local_lid;
2676 path->slid = lap_msg->alt_remote_lid;
2677 path->flow_label = cm_lap_get_flow_label(lap_msg);
2678 path->hop_limit = lap_msg->alt_hop_limit;
2679 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2680 path->reversible = 1;
2681 path->pkey = cm_id_priv->pkey;
2682 path->sl = cm_lap_get_sl(lap_msg);
2683 path->mtu_selector = IB_SA_EQ;
2684 path->mtu = cm_id_priv->path_mtu;
2685 path->rate_selector = IB_SA_EQ;
2686 path->rate = cm_lap_get_packet_rate(lap_msg);
2687 path->packet_life_time_selector = IB_SA_EQ;
2688 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2689 path->packet_life_time -= (path->packet_life_time > 0);
2692 static int cm_lap_handler(struct cm_work *work)
2694 struct cm_id_private *cm_id_priv;
2695 struct cm_lap_msg *lap_msg;
2696 struct ib_cm_lap_event_param *param;
2697 struct ib_mad_send_buf *msg = NULL;
2700 /* todo: verify LAP request and send reject APR if invalid. */
2701 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2702 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2703 lap_msg->local_comm_id);
2707 param = &work->cm_event.param.lap_rcvd;
2708 param->alternate_path = &work->path[0];
2709 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2710 work->cm_event.private_data = &lap_msg->private_data;
2712 spin_lock_irq(&cm_id_priv->lock);
2713 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2716 switch (cm_id_priv->id.lap_state) {
2717 case IB_CM_LAP_UNINIT:
2718 case IB_CM_LAP_IDLE:
2720 case IB_CM_MRA_LAP_SENT:
2721 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2722 counter[CM_LAP_COUNTER]);
2723 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2726 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2727 CM_MSG_RESPONSE_OTHER,
2728 cm_id_priv->service_timeout,
2729 cm_id_priv->private_data,
2730 cm_id_priv->private_data_len);
2731 spin_unlock_irq(&cm_id_priv->lock);
2733 if (ib_post_send_mad(msg, NULL))
2736 case IB_CM_LAP_RCVD:
2737 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2738 counter[CM_LAP_COUNTER]);
2744 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2745 cm_id_priv->tid = lap_msg->hdr.tid;
2746 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2747 work->mad_recv_wc->recv_buf.grh,
2749 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
2750 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2752 list_add_tail(&work->list, &cm_id_priv->work_list);
2753 spin_unlock_irq(&cm_id_priv->lock);
2756 cm_process_work(cm_id_priv, work);
2758 cm_deref_id(cm_id_priv);
2761 unlock: spin_unlock_irq(&cm_id_priv->lock);
2762 deref: cm_deref_id(cm_id_priv);
2766 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2767 struct cm_id_private *cm_id_priv,
2768 enum ib_cm_apr_status status,
2771 const void *private_data,
2772 u8 private_data_len)
2774 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2775 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2776 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2777 apr_msg->ap_status = (u8) status;
2779 if (info && info_length) {
2780 apr_msg->info_length = info_length;
2781 memcpy(apr_msg->info, info, info_length);
2784 if (private_data && private_data_len)
2785 memcpy(apr_msg->private_data, private_data, private_data_len);
2788 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2789 enum ib_cm_apr_status status,
2792 const void *private_data,
2793 u8 private_data_len)
2795 struct cm_id_private *cm_id_priv;
2796 struct ib_mad_send_buf *msg;
2797 unsigned long flags;
2800 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2801 (info && info_length > IB_CM_APR_INFO_LENGTH))
2804 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2805 spin_lock_irqsave(&cm_id_priv->lock, flags);
2806 if (cm_id->state != IB_CM_ESTABLISHED ||
2807 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2808 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2813 ret = cm_alloc_msg(cm_id_priv, &msg);
2817 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2818 info, info_length, private_data, private_data_len);
2819 ret = ib_post_send_mad(msg, NULL);
2821 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2826 cm_id->lap_state = IB_CM_LAP_IDLE;
2827 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2830 EXPORT_SYMBOL(ib_send_cm_apr);
2832 static int cm_apr_handler(struct cm_work *work)
2834 struct cm_id_private *cm_id_priv;
2835 struct cm_apr_msg *apr_msg;
2838 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2839 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2840 apr_msg->local_comm_id);
2842 return -EINVAL; /* Unmatched reply. */
2844 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2845 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2846 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2847 work->cm_event.private_data = &apr_msg->private_data;
2849 spin_lock_irq(&cm_id_priv->lock);
2850 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2851 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2852 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2853 spin_unlock_irq(&cm_id_priv->lock);
2856 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2857 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2858 cm_id_priv->msg = NULL;
2860 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2862 list_add_tail(&work->list, &cm_id_priv->work_list);
2863 spin_unlock_irq(&cm_id_priv->lock);
2866 cm_process_work(cm_id_priv, work);
2868 cm_deref_id(cm_id_priv);
2871 cm_deref_id(cm_id_priv);
2875 static int cm_timewait_handler(struct cm_work *work)
2877 struct cm_timewait_info *timewait_info;
2878 struct cm_id_private *cm_id_priv;
2881 timewait_info = (struct cm_timewait_info *)work;
2882 spin_lock_irq(&cm.lock);
2883 list_del(&timewait_info->list);
2884 spin_unlock_irq(&cm.lock);
2886 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2887 timewait_info->work.remote_id);
2891 spin_lock_irq(&cm_id_priv->lock);
2892 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2893 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2894 spin_unlock_irq(&cm_id_priv->lock);
2897 cm_id_priv->id.state = IB_CM_IDLE;
2898 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2900 list_add_tail(&work->list, &cm_id_priv->work_list);
2901 spin_unlock_irq(&cm_id_priv->lock);
2904 cm_process_work(cm_id_priv, work);
2906 cm_deref_id(cm_id_priv);
2909 cm_deref_id(cm_id_priv);
2913 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2914 struct cm_id_private *cm_id_priv,
2915 struct ib_cm_sidr_req_param *param)
2917 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2918 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2919 sidr_req_msg->request_id = cm_id_priv->id.local_id;
2920 sidr_req_msg->pkey = param->path->pkey;
2921 sidr_req_msg->service_id = param->service_id;
2923 if (param->private_data && param->private_data_len)
2924 memcpy(sidr_req_msg->private_data, param->private_data,
2925 param->private_data_len);
2928 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2929 struct ib_cm_sidr_req_param *param)
2931 struct cm_id_private *cm_id_priv;
2932 struct ib_mad_send_buf *msg;
2933 unsigned long flags;
2936 if (!param->path || (param->private_data &&
2937 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2940 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2941 ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2945 cm_id->service_id = param->service_id;
2946 cm_id->service_mask = ~cpu_to_be64(0);
2947 cm_id_priv->timeout_ms = param->timeout_ms;
2948 if (cm_id_priv->timeout_ms > cm_convert_to_ms(max_timeout)) {
2949 printk(KERN_WARNING PFX "sidr req timeout_ms %d > %d, "
2950 "decreasing used timeout_ms\n", param->timeout_ms,
2951 cm_convert_to_ms(max_timeout));
2952 cm_id_priv->timeout_ms = cm_convert_to_ms(max_timeout);
2954 cm_id_priv->max_cm_retries = param->max_cm_retries;
2955 ret = cm_alloc_msg(cm_id_priv, &msg);
2959 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2961 msg->timeout_ms = cm_id_priv->timeout_ms;
2962 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2964 spin_lock_irqsave(&cm_id_priv->lock, flags);
2965 if (cm_id->state == IB_CM_IDLE)
2966 ret = ib_post_send_mad(msg, NULL);
2971 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2975 cm_id->state = IB_CM_SIDR_REQ_SENT;
2976 cm_id_priv->msg = msg;
2977 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2981 EXPORT_SYMBOL(ib_send_cm_sidr_req);
2983 static void cm_format_sidr_req_event(struct cm_work *work,
2984 struct ib_cm_id *listen_id)
2986 struct cm_sidr_req_msg *sidr_req_msg;
2987 struct ib_cm_sidr_req_event_param *param;
2989 sidr_req_msg = (struct cm_sidr_req_msg *)
2990 work->mad_recv_wc->recv_buf.mad;
2991 param = &work->cm_event.param.sidr_req_rcvd;
2992 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2993 param->listen_id = listen_id;
2994 param->port = work->port->port_num;
2995 work->cm_event.private_data = &sidr_req_msg->private_data;
2998 static int cm_sidr_req_handler(struct cm_work *work)
3000 struct ib_cm_id *cm_id;
3001 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3002 struct cm_sidr_req_msg *sidr_req_msg;
3005 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3007 return PTR_ERR(cm_id);
3008 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3010 /* Record SGID/SLID and request ID for lookup. */
3011 sidr_req_msg = (struct cm_sidr_req_msg *)
3012 work->mad_recv_wc->recv_buf.mad;
3013 wc = work->mad_recv_wc->wc;
3014 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3015 cm_id_priv->av.dgid.global.interface_id = 0;
3016 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3017 work->mad_recv_wc->recv_buf.grh,
3019 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3020 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3021 atomic_inc(&cm_id_priv->work_count);
3023 spin_lock_irq(&cm.lock);
3024 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3025 if (cur_cm_id_priv) {
3026 spin_unlock_irq(&cm.lock);
3027 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3028 counter[CM_SIDR_REQ_COUNTER]);
3029 goto out; /* Duplicate message. */
3031 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3032 cur_cm_id_priv = cm_find_listen(cm_id->device,
3033 sidr_req_msg->service_id,
3034 sidr_req_msg->private_data);
3035 if (!cur_cm_id_priv) {
3036 spin_unlock_irq(&cm.lock);
3037 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3038 goto out; /* No match. */
3040 atomic_inc(&cur_cm_id_priv->refcount);
3041 spin_unlock_irq(&cm.lock);
3043 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3044 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3045 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3046 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3048 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3049 cm_process_work(cm_id_priv, work);
3050 cm_deref_id(cur_cm_id_priv);
3053 ib_destroy_cm_id(&cm_id_priv->id);
3057 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3058 struct cm_id_private *cm_id_priv,
3059 struct ib_cm_sidr_rep_param *param)
3061 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3063 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3064 sidr_rep_msg->status = param->status;
3065 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3066 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3067 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3069 if (param->info && param->info_length)
3070 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3072 if (param->private_data && param->private_data_len)
3073 memcpy(sidr_rep_msg->private_data, param->private_data,
3074 param->private_data_len);
3077 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3078 struct ib_cm_sidr_rep_param *param)
3080 struct cm_id_private *cm_id_priv;
3081 struct ib_mad_send_buf *msg;
3082 unsigned long flags;
3085 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3086 (param->private_data &&
3087 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3090 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3091 spin_lock_irqsave(&cm_id_priv->lock, flags);
3092 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3097 ret = cm_alloc_msg(cm_id_priv, &msg);
3101 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3103 ret = ib_post_send_mad(msg, NULL);
3105 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3109 cm_id->state = IB_CM_IDLE;
3110 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3112 spin_lock_irqsave(&cm.lock, flags);
3113 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3114 spin_unlock_irqrestore(&cm.lock, flags);
3117 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3120 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3122 static void cm_format_sidr_rep_event(struct cm_work *work)
3124 struct cm_sidr_rep_msg *sidr_rep_msg;
3125 struct ib_cm_sidr_rep_event_param *param;
3127 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3128 work->mad_recv_wc->recv_buf.mad;
3129 param = &work->cm_event.param.sidr_rep_rcvd;
3130 param->status = sidr_rep_msg->status;
3131 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3132 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3133 param->info = &sidr_rep_msg->info;
3134 param->info_len = sidr_rep_msg->info_length;
3135 work->cm_event.private_data = &sidr_rep_msg->private_data;
3138 static int cm_sidr_rep_handler(struct cm_work *work)
3140 struct cm_sidr_rep_msg *sidr_rep_msg;
3141 struct cm_id_private *cm_id_priv;
3143 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3144 work->mad_recv_wc->recv_buf.mad;
3145 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3147 return -EINVAL; /* Unmatched reply. */
3149 spin_lock_irq(&cm_id_priv->lock);
3150 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3151 spin_unlock_irq(&cm_id_priv->lock);
3154 cm_id_priv->id.state = IB_CM_IDLE;
3155 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3156 spin_unlock_irq(&cm_id_priv->lock);
3158 cm_format_sidr_rep_event(work);
3159 cm_process_work(cm_id_priv, work);
3162 cm_deref_id(cm_id_priv);
3166 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3167 enum ib_wc_status wc_status)
3169 struct cm_id_private *cm_id_priv;
3170 struct ib_cm_event cm_event;
3171 enum ib_cm_state state;
3174 memset(&cm_event, 0, sizeof cm_event);
3175 cm_id_priv = msg->context[0];
3177 /* Discard old sends or ones without a response. */
3178 spin_lock_irq(&cm_id_priv->lock);
3179 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3180 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3184 case IB_CM_REQ_SENT:
3185 case IB_CM_MRA_REQ_RCVD:
3186 cm_reset_to_idle(cm_id_priv);
3187 cm_event.event = IB_CM_REQ_ERROR;
3189 case IB_CM_REP_SENT:
3190 case IB_CM_MRA_REP_RCVD:
3191 cm_reset_to_idle(cm_id_priv);
3192 cm_event.event = IB_CM_REP_ERROR;
3194 case IB_CM_DREQ_SENT:
3195 cm_enter_timewait(cm_id_priv);
3196 cm_event.event = IB_CM_DREQ_ERROR;
3198 case IB_CM_SIDR_REQ_SENT:
3199 cm_id_priv->id.state = IB_CM_IDLE;
3200 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3205 spin_unlock_irq(&cm_id_priv->lock);
3206 cm_event.param.send_status = wc_status;
3208 /* No other events can occur on the cm_id at this point. */
3209 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3212 ib_destroy_cm_id(&cm_id_priv->id);
3215 spin_unlock_irq(&cm_id_priv->lock);
3219 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3220 struct ib_mad_send_wc *mad_send_wc)
3222 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3223 struct cm_port *port;
3226 port = mad_agent->context;
3227 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3228 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3231 * If the send was in response to a received message (context[0] is not
3232 * set to a cm_id), and is not a REJ, then it is a send that was
3235 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3238 atomic_long_add(1 + msg->retries,
3239 &port->counter_group[CM_XMIT].counter[attr_index]);
3241 atomic_long_add(msg->retries,
3242 &port->counter_group[CM_XMIT_RETRIES].
3243 counter[attr_index]);
3245 switch (mad_send_wc->status) {
3247 case IB_WC_WR_FLUSH_ERR:
3251 if (msg->context[0] && msg->context[1])
3252 cm_process_send_error(msg, mad_send_wc->status);
3259 static void cm_work_handler(struct work_struct *_work)
3261 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3264 switch (work->cm_event.event) {
3265 case IB_CM_REQ_RECEIVED:
3266 ret = cm_req_handler(work);
3268 case IB_CM_MRA_RECEIVED:
3269 ret = cm_mra_handler(work);
3271 case IB_CM_REJ_RECEIVED:
3272 ret = cm_rej_handler(work);
3274 case IB_CM_REP_RECEIVED:
3275 ret = cm_rep_handler(work);
3277 case IB_CM_RTU_RECEIVED:
3278 ret = cm_rtu_handler(work);
3280 case IB_CM_USER_ESTABLISHED:
3281 ret = cm_establish_handler(work);
3283 case IB_CM_DREQ_RECEIVED:
3284 ret = cm_dreq_handler(work);
3286 case IB_CM_DREP_RECEIVED:
3287 ret = cm_drep_handler(work);
3289 case IB_CM_SIDR_REQ_RECEIVED:
3290 ret = cm_sidr_req_handler(work);
3292 case IB_CM_SIDR_REP_RECEIVED:
3293 ret = cm_sidr_rep_handler(work);
3295 case IB_CM_LAP_RECEIVED:
3296 ret = cm_lap_handler(work);
3298 case IB_CM_APR_RECEIVED:
3299 ret = cm_apr_handler(work);
3301 case IB_CM_TIMEWAIT_EXIT:
3302 ret = cm_timewait_handler(work);
3312 static int cm_establish(struct ib_cm_id *cm_id)
3314 struct cm_id_private *cm_id_priv;
3315 struct cm_work *work;
3316 unsigned long flags;
3319 work = kmalloc(sizeof *work, GFP_ATOMIC);
3323 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3324 spin_lock_irqsave(&cm_id_priv->lock, flags);
3325 switch (cm_id->state)
3327 case IB_CM_REP_SENT:
3328 case IB_CM_MRA_REP_RCVD:
3329 cm_id->state = IB_CM_ESTABLISHED;
3331 case IB_CM_ESTABLISHED:
3338 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3346 * The CM worker thread may try to destroy the cm_id before it
3347 * can execute this work item. To prevent potential deadlock,
3348 * we need to find the cm_id once we're in the context of the
3349 * worker thread, rather than holding a reference on it.
3351 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3352 work->local_id = cm_id->local_id;
3353 work->remote_id = cm_id->remote_id;
3354 work->mad_recv_wc = NULL;
3355 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3356 queue_delayed_work(cm.wq, &work->work, 0);
3361 static int cm_migrate(struct ib_cm_id *cm_id)
3363 struct cm_id_private *cm_id_priv;
3364 unsigned long flags;
3367 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3368 spin_lock_irqsave(&cm_id_priv->lock, flags);
3369 if (cm_id->state == IB_CM_ESTABLISHED &&
3370 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3371 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3372 cm_id->lap_state = IB_CM_LAP_IDLE;
3373 cm_id_priv->av = cm_id_priv->alt_av;
3376 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3381 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3386 case IB_EVENT_COMM_EST:
3387 ret = cm_establish(cm_id);
3389 case IB_EVENT_PATH_MIG:
3390 ret = cm_migrate(cm_id);
3397 EXPORT_SYMBOL(ib_cm_notify);
3399 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3400 struct ib_mad_recv_wc *mad_recv_wc)
3402 struct cm_port *port = mad_agent->context;
3403 struct cm_work *work;
3404 enum ib_cm_event_type event;
3408 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3409 case CM_REQ_ATTR_ID:
3410 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3411 alt_local_lid != 0);
3412 event = IB_CM_REQ_RECEIVED;
3414 case CM_MRA_ATTR_ID:
3415 event = IB_CM_MRA_RECEIVED;
3417 case CM_REJ_ATTR_ID:
3418 event = IB_CM_REJ_RECEIVED;
3420 case CM_REP_ATTR_ID:
3421 event = IB_CM_REP_RECEIVED;
3423 case CM_RTU_ATTR_ID:
3424 event = IB_CM_RTU_RECEIVED;
3426 case CM_DREQ_ATTR_ID:
3427 event = IB_CM_DREQ_RECEIVED;
3429 case CM_DREP_ATTR_ID:
3430 event = IB_CM_DREP_RECEIVED;
3432 case CM_SIDR_REQ_ATTR_ID:
3433 event = IB_CM_SIDR_REQ_RECEIVED;
3435 case CM_SIDR_REP_ATTR_ID:
3436 event = IB_CM_SIDR_REP_RECEIVED;
3438 case CM_LAP_ATTR_ID:
3440 event = IB_CM_LAP_RECEIVED;
3442 case CM_APR_ATTR_ID:
3443 event = IB_CM_APR_RECEIVED;
3446 ib_free_recv_mad(mad_recv_wc);
3450 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3451 atomic_long_inc(&port->counter_group[CM_RECV].
3452 counter[attr_id - CM_ATTR_ID_OFFSET]);
3454 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3457 ib_free_recv_mad(mad_recv_wc);
3461 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3462 work->cm_event.event = event;
3463 work->mad_recv_wc = mad_recv_wc;
3465 queue_delayed_work(cm.wq, &work->work, 0);
3468 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3469 struct ib_qp_attr *qp_attr,
3472 unsigned long flags;
3475 spin_lock_irqsave(&cm_id_priv->lock, flags);
3476 switch (cm_id_priv->id.state) {
3477 case IB_CM_REQ_SENT:
3478 case IB_CM_MRA_REQ_RCVD:
3479 case IB_CM_REQ_RCVD:
3480 case IB_CM_MRA_REQ_SENT:
3481 case IB_CM_REP_RCVD:
3482 case IB_CM_MRA_REP_SENT:
3483 case IB_CM_REP_SENT:
3484 case IB_CM_MRA_REP_RCVD:
3485 case IB_CM_ESTABLISHED:
3486 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3487 IB_QP_PKEY_INDEX | IB_QP_PORT;
3488 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3489 if (cm_id_priv->responder_resources)
3490 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3491 IB_ACCESS_REMOTE_ATOMIC;
3492 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3493 qp_attr->port_num = cm_id_priv->av.port->port_num;
3500 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3504 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3505 struct ib_qp_attr *qp_attr,
3508 unsigned long flags;
3511 spin_lock_irqsave(&cm_id_priv->lock, flags);
3512 switch (cm_id_priv->id.state) {
3513 case IB_CM_REQ_RCVD:
3514 case IB_CM_MRA_REQ_SENT:
3515 case IB_CM_REP_RCVD:
3516 case IB_CM_MRA_REP_SENT:
3517 case IB_CM_REP_SENT:
3518 case IB_CM_MRA_REP_RCVD:
3519 case IB_CM_ESTABLISHED:
3520 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3521 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3522 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3523 qp_attr->path_mtu = cm_id_priv->path_mtu;
3524 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3525 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3526 if (cm_id_priv->qp_type == IB_QPT_RC) {
3527 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3528 IB_QP_MIN_RNR_TIMER;
3529 qp_attr->max_dest_rd_atomic =
3530 cm_id_priv->responder_resources;
3531 qp_attr->min_rnr_timer = 0;
3533 if (cm_id_priv->alt_av.ah_attr.dlid) {
3534 *qp_attr_mask |= IB_QP_ALT_PATH;
3535 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3536 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3537 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3538 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3546 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3550 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3551 struct ib_qp_attr *qp_attr,
3554 unsigned long flags;
3557 spin_lock_irqsave(&cm_id_priv->lock, flags);
3558 switch (cm_id_priv->id.state) {
3559 /* Allow transition to RTS before sending REP */
3560 case IB_CM_REQ_RCVD:
3561 case IB_CM_MRA_REQ_SENT:
3563 case IB_CM_REP_RCVD:
3564 case IB_CM_MRA_REP_SENT:
3565 case IB_CM_REP_SENT:
3566 case IB_CM_MRA_REP_RCVD:
3567 case IB_CM_ESTABLISHED:
3568 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3569 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3570 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3571 if (cm_id_priv->qp_type == IB_QPT_RC) {
3572 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3574 IB_QP_MAX_QP_RD_ATOMIC;
3575 qp_attr->timeout = cm_id_priv->av.timeout;
3576 qp_attr->retry_cnt = cm_id_priv->retry_count;
3577 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3578 qp_attr->max_rd_atomic =
3579 cm_id_priv->initiator_depth;
3581 if (cm_id_priv->alt_av.ah_attr.dlid) {
3582 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3583 qp_attr->path_mig_state = IB_MIG_REARM;
3586 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3587 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3588 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3589 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3590 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3591 qp_attr->path_mig_state = IB_MIG_REARM;
3599 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3603 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3604 struct ib_qp_attr *qp_attr,
3607 struct cm_id_private *cm_id_priv;
3610 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3611 switch (qp_attr->qp_state) {
3613 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3616 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3619 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3627 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3629 static void cm_get_ack_delay(struct cm_device *cm_dev)
3631 struct ib_device_attr attr;
3633 if (ib_query_device(cm_dev->ib_device, &attr))
3634 cm_dev->ack_delay = 0; /* acks will rely on packet life time */
3636 cm_dev->ack_delay = attr.local_ca_ack_delay;
3639 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3642 struct cm_counter_group *group;
3643 struct cm_counter_attribute *cm_attr;
3645 group = container_of(obj, struct cm_counter_group, obj);
3646 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3648 return sprintf(buf, "%ld\n",
3649 atomic_long_read(&group->counter[cm_attr->index]));
3652 static struct sysfs_ops cm_counter_ops = {
3653 .show = cm_show_counter
3656 static struct kobj_type cm_counter_obj_type = {
3657 .sysfs_ops = &cm_counter_ops,
3658 .default_attrs = cm_counter_default_attrs
3661 static void cm_release_port_obj(struct kobject *obj)
3663 struct cm_port *cm_port;
3665 cm_port = container_of(obj, struct cm_port, port_obj);
3669 static struct kobj_type cm_port_obj_type = {
3670 .release = cm_release_port_obj
3673 struct class cm_class = {
3674 .name = "infiniband_cm",
3676 EXPORT_SYMBOL(cm_class);
3678 static int cm_create_port_fs(struct cm_port *port)
3682 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3683 &port->cm_dev->device->kobj,
3684 "%d", port->port_num);
3690 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3691 ret = kobject_init_and_add(&port->counter_group[i].obj,
3692 &cm_counter_obj_type,
3694 "%s", counter_group_names[i]);
3703 kobject_put(&port->counter_group[i].obj);
3704 kobject_put(&port->port_obj);
3709 static void cm_remove_port_fs(struct cm_port *port)
3713 for (i = 0; i < CM_COUNTER_GROUPS; i++)
3714 kobject_put(&port->counter_group[i].obj);
3716 kobject_put(&port->port_obj);
3719 static void cm_add_one(struct ib_device *ib_device)
3721 struct cm_device *cm_dev;
3722 struct cm_port *port;
3723 struct ib_mad_reg_req reg_req = {
3724 .mgmt_class = IB_MGMT_CLASS_CM,
3725 .mgmt_class_version = IB_CM_CLASS_VERSION
3727 struct ib_port_modify port_modify = {
3728 .set_port_cap_mask = IB_PORT_CM_SUP
3730 unsigned long flags;
3734 if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB)
3737 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
3738 ib_device->phys_port_cnt, GFP_KERNEL);
3742 cm_dev->ib_device = ib_device;
3743 cm_get_ack_delay(cm_dev);
3745 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3747 "%s", ib_device->name);
3748 if (!cm_dev->device) {
3753 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3754 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3755 port = kzalloc(sizeof *port, GFP_KERNEL);
3759 cm_dev->port[i-1] = port;
3760 port->cm_dev = cm_dev;
3763 ret = cm_create_port_fs(port);
3767 port->mad_agent = ib_register_mad_agent(ib_device, i,
3774 if (IS_ERR(port->mad_agent))
3777 ret = ib_modify_port(ib_device, i, 0, &port_modify);
3781 ib_set_client_data(ib_device, &cm_client, cm_dev);
3783 write_lock_irqsave(&cm.device_lock, flags);
3784 list_add_tail(&cm_dev->list, &cm.device_list);
3785 write_unlock_irqrestore(&cm.device_lock, flags);
3789 ib_unregister_mad_agent(port->mad_agent);
3791 cm_remove_port_fs(port);
3793 port_modify.set_port_cap_mask = 0;
3794 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3796 port = cm_dev->port[i-1];
3797 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3798 ib_unregister_mad_agent(port->mad_agent);
3799 cm_remove_port_fs(port);
3801 device_unregister(cm_dev->device);
3805 static void cm_remove_one(struct ib_device *ib_device)
3807 struct cm_device *cm_dev;
3808 struct cm_port *port;
3809 struct ib_port_modify port_modify = {
3810 .clr_port_cap_mask = IB_PORT_CM_SUP
3812 unsigned long flags;
3815 cm_dev = ib_get_client_data(ib_device, &cm_client);
3819 write_lock_irqsave(&cm.device_lock, flags);
3820 list_del(&cm_dev->list);
3821 write_unlock_irqrestore(&cm.device_lock, flags);
3823 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3824 port = cm_dev->port[i-1];
3825 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3826 ib_unregister_mad_agent(port->mad_agent);
3827 flush_workqueue(cm.wq);
3828 cm_remove_port_fs(port);
3830 device_unregister(cm_dev->device);
3834 static int __init ib_cm_init(void)
3838 memset(&cm, 0, sizeof cm);
3839 INIT_LIST_HEAD(&cm.device_list);
3840 rwlock_init(&cm.device_lock);
3841 spin_lock_init(&cm.lock);
3842 cm.listen_service_table = RB_ROOT;
3843 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3844 cm.remote_id_table = RB_ROOT;
3845 cm.remote_qp_table = RB_ROOT;
3846 cm.remote_sidr_table = RB_ROOT;
3847 idr_init(&cm.local_id_table);
3848 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
3849 idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3850 INIT_LIST_HEAD(&cm.timewait_list);
3852 ret = class_register(&cm_class);
3856 cm.wq = create_workqueue("ib_cm");
3862 ret = ib_register_client(&cm_client);
3868 destroy_workqueue(cm.wq);
3870 class_unregister(&cm_class);
3874 static void __exit ib_cm_cleanup(void)
3876 struct cm_timewait_info *timewait_info, *tmp;
3878 spin_lock_irq(&cm.lock);
3879 list_for_each_entry(timewait_info, &cm.timewait_list, list)
3880 cancel_delayed_work(&timewait_info->work.work);
3881 spin_unlock_irq(&cm.lock);
3883 ib_unregister_client(&cm_client);
3884 destroy_workqueue(cm.wq);
3886 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
3887 list_del(&timewait_info->list);
3888 kfree(timewait_info);
3891 class_unregister(&cm_class);
3892 idr_destroy(&cm.local_id_table);
3895 module_init_order(ib_cm_init, SI_ORDER_SECOND);
3896 module_exit_order(ib_cm_cleanup, SI_ORDER_FIRST);