2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
52 #include <asm/atomic-long.h>
54 #include <rdma/ib_cache.h>
55 #include <rdma/ib_cm.h>
58 MODULE_AUTHOR("Sean Hefty");
59 MODULE_DESCRIPTION("InfiniBand CM");
60 MODULE_LICENSE("Dual BSD/GPL");
62 static void cm_add_one(struct ib_device *device);
63 static void cm_remove_one(struct ib_device *device, void *client_data);
65 static struct ib_client cm_client = {
68 .remove = cm_remove_one
73 struct list_head device_list;
75 struct rb_root listen_service_table;
76 u64 listen_service_id;
77 /* struct rb_root peer_service_table; todo: fix peer to peer */
78 struct rb_root remote_qp_table;
79 struct rb_root remote_id_table;
80 struct rb_root remote_sidr_table;
81 struct idr local_id_table;
82 __be32 random_id_operand;
83 struct list_head timewait_list;
84 struct workqueue_struct *wq;
85 /* Sync on cm change port state */
86 spinlock_t state_lock;
89 /* Counter indexes ordered by attribute ID */
103 CM_ATTR_ID_OFFSET = 0x0010,
114 static char const counter_group_names[CM_COUNTER_GROUPS]
115 [sizeof("cm_rx_duplicates")] = {
116 "cm_tx_msgs", "cm_tx_retries",
117 "cm_rx_msgs", "cm_rx_duplicates"
120 struct cm_counter_group {
122 atomic_long_t counter[CM_ATTR_COUNT];
125 struct cm_counter_attribute {
126 struct attribute attr;
130 #define CM_COUNTER_ATTR(_name, _index) \
131 struct cm_counter_attribute cm_##_name##_counter_attr = { \
132 .attr = { .name = __stringify(_name), .mode = 0444 }, \
136 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
137 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
138 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
139 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
140 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
141 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
142 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
143 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
144 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
145 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
146 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
148 static struct attribute *cm_counter_default_attrs[] = {
149 &cm_req_counter_attr.attr,
150 &cm_mra_counter_attr.attr,
151 &cm_rej_counter_attr.attr,
152 &cm_rep_counter_attr.attr,
153 &cm_rtu_counter_attr.attr,
154 &cm_dreq_counter_attr.attr,
155 &cm_drep_counter_attr.attr,
156 &cm_sidr_req_counter_attr.attr,
157 &cm_sidr_rep_counter_attr.attr,
158 &cm_lap_counter_attr.attr,
159 &cm_apr_counter_attr.attr,
164 struct cm_device *cm_dev;
165 struct ib_mad_agent *mad_agent;
166 struct kobject port_obj;
168 struct list_head cm_priv_prim_list;
169 struct list_head cm_priv_altr_list;
170 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
174 struct list_head list;
175 struct ib_device *ib_device;
176 struct device *device;
179 struct cm_port *port[0];
183 struct cm_port *port;
185 struct ib_ah_attr ah_attr;
191 struct delayed_work work;
192 struct list_head list;
193 struct cm_port *port;
194 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
195 __be32 local_id; /* Established / timewait */
197 struct ib_cm_event cm_event;
198 struct ib_sa_path_rec path[0];
201 struct cm_timewait_info {
202 struct cm_work work; /* Must be first. */
203 struct list_head list;
204 struct rb_node remote_qp_node;
205 struct rb_node remote_id_node;
206 __be64 remote_ca_guid;
208 u8 inserted_remote_qp;
209 u8 inserted_remote_id;
212 struct cm_id_private {
215 struct rb_node service_node;
216 struct rb_node sidr_id_node;
217 spinlock_t lock; /* Do not acquire inside cm.lock */
218 struct completion comp;
220 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
221 * Protected by the cm.lock spinlock. */
222 int listen_sharecount;
224 struct ib_mad_send_buf *msg;
225 struct cm_timewait_info *timewait_info;
226 /* todo: use alternate port on send failure */
234 enum ib_qp_type qp_type;
238 enum ib_mtu path_mtu;
243 u8 responder_resources;
250 struct list_head prim_list;
251 struct list_head altr_list;
252 /* Indicates that the send port mad is registered and av is set */
253 int prim_send_port_not_ready;
254 int altr_send_port_not_ready;
256 struct list_head work_list;
260 static void cm_work_handler(struct work_struct *work);
262 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
264 if (atomic_dec_and_test(&cm_id_priv->refcount))
265 complete(&cm_id_priv->comp);
268 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
269 struct ib_mad_send_buf **msg)
271 struct ib_mad_agent *mad_agent;
272 struct ib_mad_send_buf *m;
275 unsigned long flags, flags2;
278 /* don't let the port to be released till the agent is down */
279 spin_lock_irqsave(&cm.state_lock, flags2);
280 spin_lock_irqsave(&cm.lock, flags);
281 if (!cm_id_priv->prim_send_port_not_ready)
282 av = &cm_id_priv->av;
283 else if (!cm_id_priv->altr_send_port_not_ready &&
284 (cm_id_priv->alt_av.port))
285 av = &cm_id_priv->alt_av;
287 pr_info("%s: not valid CM id\n", __func__);
289 spin_unlock_irqrestore(&cm.lock, flags);
292 spin_unlock_irqrestore(&cm.lock, flags);
293 /* Make sure the port haven't released the mad yet */
294 mad_agent = cm_id_priv->av.port->mad_agent;
296 pr_info("%s: not a valid MAD agent\n", __func__);
300 ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
306 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
308 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
310 IB_MGMT_BASE_VERSION);
317 /* Timeout set by caller if response is expected. */
319 m->retries = cm_id_priv->max_cm_retries;
321 atomic_inc(&cm_id_priv->refcount);
322 m->context[0] = cm_id_priv;
326 spin_unlock_irqrestore(&cm.state_lock, flags2);
330 static int cm_alloc_response_msg(struct cm_port *port,
331 struct ib_mad_recv_wc *mad_recv_wc,
332 struct ib_mad_send_buf **msg)
334 struct ib_mad_send_buf *m;
337 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
338 mad_recv_wc->recv_buf.grh, port->port_num);
342 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
343 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
345 IB_MGMT_BASE_VERSION);
355 static void cm_free_msg(struct ib_mad_send_buf *msg)
357 ib_destroy_ah(msg->ah);
359 cm_deref_id(msg->context[0]);
360 ib_free_send_mad(msg);
363 static void * cm_copy_private_data(const void *private_data,
368 if (!private_data || !private_data_len)
371 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
373 return ERR_PTR(-ENOMEM);
378 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
379 void *private_data, u8 private_data_len)
381 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
382 kfree(cm_id_priv->private_data);
384 cm_id_priv->private_data = private_data;
385 cm_id_priv->private_data_len = private_data_len;
388 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
389 struct ib_grh *grh, struct cm_av *av)
392 av->pkey_index = wc->pkey_index;
393 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
397 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
398 struct cm_id_private *cm_id_priv)
400 struct cm_device *cm_dev;
401 struct cm_port *port = NULL;
405 struct net_device *ndev = ib_get_ndev_from_path(path);
407 read_lock_irqsave(&cm.device_lock, flags);
408 list_for_each_entry(cm_dev, &cm.device_list, list) {
409 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
410 path->gid_type, ndev, &p, NULL)) {
411 port = cm_dev->port[p-1];
415 read_unlock_irqrestore(&cm.device_lock, flags);
423 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
424 be16_to_cpu(path->pkey), &av->pkey_index);
429 ret = ib_init_ah_from_path(cm_dev->ib_device, port->port_num,
434 av->timeout = path->packet_life_time + 1;
436 spin_lock_irqsave(&cm.lock, flags);
437 if (&cm_id_priv->av == av)
438 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
439 else if (&cm_id_priv->alt_av == av)
440 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
444 spin_unlock_irqrestore(&cm.lock, flags);
449 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
454 idr_preload(GFP_KERNEL);
455 spin_lock_irqsave(&cm.lock, flags);
457 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
459 spin_unlock_irqrestore(&cm.lock, flags);
462 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
463 return id < 0 ? id : 0;
466 static void cm_free_id(__be32 local_id)
468 spin_lock_irq(&cm.lock);
469 idr_remove(&cm.local_id_table,
470 (__force int) (local_id ^ cm.random_id_operand));
471 spin_unlock_irq(&cm.lock);
474 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
476 struct cm_id_private *cm_id_priv;
478 cm_id_priv = idr_find(&cm.local_id_table,
479 (__force int) (local_id ^ cm.random_id_operand));
481 if (cm_id_priv->id.remote_id == remote_id)
482 atomic_inc(&cm_id_priv->refcount);
490 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
492 struct cm_id_private *cm_id_priv;
494 spin_lock_irq(&cm.lock);
495 cm_id_priv = cm_get_id(local_id, remote_id);
496 spin_unlock_irq(&cm.lock);
502 * Trivial helpers to strip endian annotation and compare; the
503 * endianness doesn't actually matter since we just need a stable
504 * order for the RB tree.
506 static int be32_lt(__be32 a, __be32 b)
508 return (__force u32) a < (__force u32) b;
511 static int be32_gt(__be32 a, __be32 b)
513 return (__force u32) a > (__force u32) b;
516 static int be64_lt(__be64 a, __be64 b)
518 return (__force u64) a < (__force u64) b;
521 static int be64_gt(__be64 a, __be64 b)
523 return (__force u64) a > (__force u64) b;
526 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
528 struct rb_node **link = &cm.listen_service_table.rb_node;
529 struct rb_node *parent = NULL;
530 struct cm_id_private *cur_cm_id_priv;
531 __be64 service_id = cm_id_priv->id.service_id;
532 __be64 service_mask = cm_id_priv->id.service_mask;
536 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
538 if ((cur_cm_id_priv->id.service_mask & service_id) ==
539 (service_mask & cur_cm_id_priv->id.service_id) &&
540 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
541 return cur_cm_id_priv;
543 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
544 link = &(*link)->rb_left;
545 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
546 link = &(*link)->rb_right;
547 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
548 link = &(*link)->rb_left;
549 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
550 link = &(*link)->rb_right;
552 link = &(*link)->rb_right;
554 rb_link_node(&cm_id_priv->service_node, parent, link);
555 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
559 static struct cm_id_private * cm_find_listen(struct ib_device *device,
562 struct rb_node *node = cm.listen_service_table.rb_node;
563 struct cm_id_private *cm_id_priv;
566 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
567 if ((cm_id_priv->id.service_mask & service_id) ==
568 cm_id_priv->id.service_id &&
569 (cm_id_priv->id.device == device))
572 if (device < cm_id_priv->id.device)
573 node = node->rb_left;
574 else if (device > cm_id_priv->id.device)
575 node = node->rb_right;
576 else if (be64_lt(service_id, cm_id_priv->id.service_id))
577 node = node->rb_left;
578 else if (be64_gt(service_id, cm_id_priv->id.service_id))
579 node = node->rb_right;
581 node = node->rb_right;
586 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
589 struct rb_node **link = &cm.remote_id_table.rb_node;
590 struct rb_node *parent = NULL;
591 struct cm_timewait_info *cur_timewait_info;
592 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
593 __be32 remote_id = timewait_info->work.remote_id;
597 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
599 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
600 link = &(*link)->rb_left;
601 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
602 link = &(*link)->rb_right;
603 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
604 link = &(*link)->rb_left;
605 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
606 link = &(*link)->rb_right;
608 return cur_timewait_info;
610 timewait_info->inserted_remote_id = 1;
611 rb_link_node(&timewait_info->remote_id_node, parent, link);
612 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
616 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
619 struct rb_node *node = cm.remote_id_table.rb_node;
620 struct cm_timewait_info *timewait_info;
623 timewait_info = rb_entry(node, struct cm_timewait_info,
625 if (be32_lt(remote_id, timewait_info->work.remote_id))
626 node = node->rb_left;
627 else if (be32_gt(remote_id, timewait_info->work.remote_id))
628 node = node->rb_right;
629 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
630 node = node->rb_left;
631 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
632 node = node->rb_right;
634 return timewait_info;
639 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
642 struct rb_node **link = &cm.remote_qp_table.rb_node;
643 struct rb_node *parent = NULL;
644 struct cm_timewait_info *cur_timewait_info;
645 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
646 __be32 remote_qpn = timewait_info->remote_qpn;
650 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
652 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
653 link = &(*link)->rb_left;
654 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
655 link = &(*link)->rb_right;
656 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
657 link = &(*link)->rb_left;
658 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
659 link = &(*link)->rb_right;
661 return cur_timewait_info;
663 timewait_info->inserted_remote_qp = 1;
664 rb_link_node(&timewait_info->remote_qp_node, parent, link);
665 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
669 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
672 struct rb_node **link = &cm.remote_sidr_table.rb_node;
673 struct rb_node *parent = NULL;
674 struct cm_id_private *cur_cm_id_priv;
675 union ib_gid *port_gid = &cm_id_priv->av.dgid;
676 __be32 remote_id = cm_id_priv->id.remote_id;
680 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
682 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
683 link = &(*link)->rb_left;
684 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
685 link = &(*link)->rb_right;
688 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
691 link = &(*link)->rb_left;
693 link = &(*link)->rb_right;
695 return cur_cm_id_priv;
698 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
699 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
703 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
704 enum ib_cm_sidr_status status)
706 struct ib_cm_sidr_rep_param param;
708 memset(¶m, 0, sizeof param);
709 param.status = status;
710 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
713 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
714 ib_cm_handler cm_handler,
717 struct cm_id_private *cm_id_priv;
720 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
722 return ERR_PTR(-ENOMEM);
724 cm_id_priv->id.state = IB_CM_IDLE;
725 cm_id_priv->id.device = device;
726 cm_id_priv->id.cm_handler = cm_handler;
727 cm_id_priv->id.context = context;
728 cm_id_priv->id.remote_cm_qpn = 1;
729 ret = cm_alloc_id(cm_id_priv);
733 spin_lock_init(&cm_id_priv->lock);
734 init_completion(&cm_id_priv->comp);
735 INIT_LIST_HEAD(&cm_id_priv->work_list);
736 INIT_LIST_HEAD(&cm_id_priv->prim_list);
737 INIT_LIST_HEAD(&cm_id_priv->altr_list);
738 atomic_set(&cm_id_priv->work_count, -1);
739 atomic_set(&cm_id_priv->refcount, 1);
740 return &cm_id_priv->id;
744 return ERR_PTR(-ENOMEM);
746 EXPORT_SYMBOL(ib_create_cm_id);
748 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
750 struct cm_work *work;
752 if (list_empty(&cm_id_priv->work_list))
755 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
756 list_del(&work->list);
760 static void cm_free_work(struct cm_work *work)
762 if (work->mad_recv_wc)
763 ib_free_recv_mad(work->mad_recv_wc);
767 static inline int cm_convert_to_ms(int iba_time)
769 /* approximate conversion to ms from 4.096us x 2^iba_time */
770 return 1 << max(iba_time - 8, 0);
774 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
775 * Because of how ack_timeout is stored, adding one doubles the timeout.
776 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
777 * increment it (round up) only if the other is within 50%.
779 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
781 int ack_timeout = packet_life_time + 1;
783 if (ack_timeout >= ca_ack_delay)
784 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
786 ack_timeout = ca_ack_delay +
787 (ack_timeout >= (ca_ack_delay - 1));
789 return min(31, ack_timeout);
792 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
794 if (timewait_info->inserted_remote_id) {
795 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
796 timewait_info->inserted_remote_id = 0;
799 if (timewait_info->inserted_remote_qp) {
800 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
801 timewait_info->inserted_remote_qp = 0;
805 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
807 struct cm_timewait_info *timewait_info;
809 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
811 return ERR_PTR(-ENOMEM);
813 timewait_info->work.local_id = local_id;
814 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
815 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
816 return timewait_info;
819 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
823 struct cm_device *cm_dev;
825 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
829 spin_lock_irqsave(&cm.lock, flags);
830 cm_cleanup_timewait(cm_id_priv->timewait_info);
831 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
832 spin_unlock_irqrestore(&cm.lock, flags);
835 * The cm_id could be destroyed by the user before we exit timewait.
836 * To protect against this, we search for the cm_id after exiting
837 * timewait before notifying the user that we've exited timewait.
839 cm_id_priv->id.state = IB_CM_TIMEWAIT;
840 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
842 /* Check if the device started its remove_one */
843 spin_lock_irqsave(&cm.lock, flags);
844 if (!cm_dev->going_down)
845 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
846 msecs_to_jiffies(wait_time));
847 spin_unlock_irqrestore(&cm.lock, flags);
849 cm_id_priv->timewait_info = NULL;
852 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
856 cm_id_priv->id.state = IB_CM_IDLE;
857 if (cm_id_priv->timewait_info) {
858 spin_lock_irqsave(&cm.lock, flags);
859 cm_cleanup_timewait(cm_id_priv->timewait_info);
860 spin_unlock_irqrestore(&cm.lock, flags);
861 kfree(cm_id_priv->timewait_info);
862 cm_id_priv->timewait_info = NULL;
866 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
868 struct cm_id_private *cm_id_priv;
869 struct cm_work *work;
871 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
873 spin_lock_irq(&cm_id_priv->lock);
874 switch (cm_id->state) {
876 spin_unlock_irq(&cm_id_priv->lock);
878 spin_lock_irq(&cm.lock);
879 if (--cm_id_priv->listen_sharecount > 0) {
880 /* The id is still shared. */
881 cm_deref_id(cm_id_priv);
882 spin_unlock_irq(&cm.lock);
885 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
886 spin_unlock_irq(&cm.lock);
888 case IB_CM_SIDR_REQ_SENT:
889 cm_id->state = IB_CM_IDLE;
890 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
891 spin_unlock_irq(&cm_id_priv->lock);
893 case IB_CM_SIDR_REQ_RCVD:
894 spin_unlock_irq(&cm_id_priv->lock);
895 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
896 spin_lock_irq(&cm.lock);
897 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
898 rb_erase(&cm_id_priv->sidr_id_node,
899 &cm.remote_sidr_table);
900 spin_unlock_irq(&cm.lock);
903 case IB_CM_MRA_REQ_RCVD:
904 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
905 spin_unlock_irq(&cm_id_priv->lock);
906 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
907 &cm_id_priv->id.device->node_guid,
908 sizeof cm_id_priv->id.device->node_guid,
912 if (err == -ENOMEM) {
913 /* Do not reject to allow future retries. */
914 cm_reset_to_idle(cm_id_priv);
915 spin_unlock_irq(&cm_id_priv->lock);
917 spin_unlock_irq(&cm_id_priv->lock);
918 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
923 case IB_CM_MRA_REP_RCVD:
924 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
926 case IB_CM_MRA_REQ_SENT:
928 case IB_CM_MRA_REP_SENT:
929 spin_unlock_irq(&cm_id_priv->lock);
930 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
933 case IB_CM_ESTABLISHED:
934 spin_unlock_irq(&cm_id_priv->lock);
935 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
937 ib_send_cm_dreq(cm_id, NULL, 0);
939 case IB_CM_DREQ_SENT:
940 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
941 cm_enter_timewait(cm_id_priv);
942 spin_unlock_irq(&cm_id_priv->lock);
944 case IB_CM_DREQ_RCVD:
945 spin_unlock_irq(&cm_id_priv->lock);
946 ib_send_cm_drep(cm_id, NULL, 0);
949 spin_unlock_irq(&cm_id_priv->lock);
953 spin_lock_irq(&cm.lock);
954 if (!list_empty(&cm_id_priv->altr_list) &&
955 (!cm_id_priv->altr_send_port_not_ready))
956 list_del(&cm_id_priv->altr_list);
957 if (!list_empty(&cm_id_priv->prim_list) &&
958 (!cm_id_priv->prim_send_port_not_ready))
959 list_del(&cm_id_priv->prim_list);
960 spin_unlock_irq(&cm.lock);
962 cm_free_id(cm_id->local_id);
963 cm_deref_id(cm_id_priv);
964 wait_for_completion(&cm_id_priv->comp);
965 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
967 kfree(cm_id_priv->private_data);
971 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
973 cm_destroy_id(cm_id, 0);
975 EXPORT_SYMBOL(ib_destroy_cm_id);
978 * __ib_cm_listen - Initiates listening on the specified service ID for
979 * connection and service ID resolution requests.
980 * @cm_id: Connection identifier associated with the listen request.
981 * @service_id: Service identifier matched against incoming connection
982 * and service ID resolution requests. The service ID should be specified
983 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
984 * assign a service ID to the caller.
985 * @service_mask: Mask applied to service ID used to listen across a
986 * range of service IDs. If set to 0, the service ID is matched
987 * exactly. This parameter is ignored if %service_id is set to
988 * IB_CM_ASSIGN_SERVICE_ID.
990 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
993 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
996 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
997 service_id &= service_mask;
998 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
999 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1002 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1003 if (cm_id->state != IB_CM_IDLE)
1006 cm_id->state = IB_CM_LISTEN;
1007 ++cm_id_priv->listen_sharecount;
1009 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1010 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
1011 cm_id->service_mask = ~cpu_to_be64(0);
1013 cm_id->service_id = service_id;
1014 cm_id->service_mask = service_mask;
1016 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
1018 if (cur_cm_id_priv) {
1019 cm_id->state = IB_CM_IDLE;
1020 --cm_id_priv->listen_sharecount;
1026 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1028 unsigned long flags;
1031 spin_lock_irqsave(&cm.lock, flags);
1032 ret = __ib_cm_listen(cm_id, service_id, service_mask);
1033 spin_unlock_irqrestore(&cm.lock, flags);
1037 EXPORT_SYMBOL(ib_cm_listen);
1040 * Create a new listening ib_cm_id and listen on the given service ID.
1042 * If there's an existing ID listening on that same device and service ID,
1045 * @device: Device associated with the cm_id. All related communication will
1046 * be associated with the specified device.
1047 * @cm_handler: Callback invoked to notify the user of CM events.
1048 * @service_id: Service identifier matched against incoming connection
1049 * and service ID resolution requests. The service ID should be specified
1050 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1051 * assign a service ID to the caller.
1053 * Callers should call ib_destroy_cm_id when done with the listener ID.
1055 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1056 ib_cm_handler cm_handler,
1059 struct cm_id_private *cm_id_priv;
1060 struct ib_cm_id *cm_id;
1061 unsigned long flags;
1064 /* Create an ID in advance, since the creation may sleep */
1065 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1069 spin_lock_irqsave(&cm.lock, flags);
1071 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1074 /* Find an existing ID */
1075 cm_id_priv = cm_find_listen(device, service_id);
1077 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1078 /* Sharing an ib_cm_id with different handlers is not
1080 spin_unlock_irqrestore(&cm.lock, flags);
1081 return ERR_PTR(-EINVAL);
1083 atomic_inc(&cm_id_priv->refcount);
1084 ++cm_id_priv->listen_sharecount;
1085 spin_unlock_irqrestore(&cm.lock, flags);
1087 ib_destroy_cm_id(cm_id);
1088 cm_id = &cm_id_priv->id;
1093 /* Use newly created ID */
1094 err = __ib_cm_listen(cm_id, service_id, 0);
1096 spin_unlock_irqrestore(&cm.lock, flags);
1099 ib_destroy_cm_id(cm_id);
1100 return ERR_PTR(err);
1104 EXPORT_SYMBOL(ib_cm_insert_listen);
1106 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
1107 enum cm_msg_sequence msg_seq)
1109 u64 hi_tid, low_tid;
1111 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1112 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
1114 return cpu_to_be64(hi_tid | low_tid);
1117 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1118 __be16 attr_id, __be64 tid)
1120 hdr->base_version = IB_MGMT_BASE_VERSION;
1121 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1122 hdr->class_version = IB_CM_CLASS_VERSION;
1123 hdr->method = IB_MGMT_METHOD_SEND;
1124 hdr->attr_id = attr_id;
1128 static void cm_format_req(struct cm_req_msg *req_msg,
1129 struct cm_id_private *cm_id_priv,
1130 struct ib_cm_req_param *param)
1132 struct ib_sa_path_rec *pri_path = param->primary_path;
1133 struct ib_sa_path_rec *alt_path = param->alternate_path;
1135 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1136 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1138 req_msg->local_comm_id = cm_id_priv->id.local_id;
1139 req_msg->service_id = param->service_id;
1140 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1141 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1142 cm_req_set_init_depth(req_msg, param->initiator_depth);
1143 cm_req_set_remote_resp_timeout(req_msg,
1144 param->remote_cm_response_timeout);
1145 cm_req_set_qp_type(req_msg, param->qp_type);
1146 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1147 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1148 cm_req_set_local_resp_timeout(req_msg,
1149 param->local_cm_response_timeout);
1150 req_msg->pkey = param->primary_path->pkey;
1151 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1152 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1154 if (param->qp_type != IB_QPT_XRC_INI) {
1155 cm_req_set_resp_res(req_msg, param->responder_resources);
1156 cm_req_set_retry_count(req_msg, param->retry_count);
1157 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1158 cm_req_set_srq(req_msg, param->srq);
1161 if (pri_path->hop_limit <= 1) {
1162 req_msg->primary_local_lid = pri_path->slid;
1163 req_msg->primary_remote_lid = pri_path->dlid;
1165 /* Work-around until there's a way to obtain remote LID info */
1166 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1167 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1169 req_msg->primary_local_gid = pri_path->sgid;
1170 req_msg->primary_remote_gid = pri_path->dgid;
1171 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1172 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1173 req_msg->primary_traffic_class = pri_path->traffic_class;
1174 req_msg->primary_hop_limit = pri_path->hop_limit;
1175 cm_req_set_primary_sl(req_msg, pri_path->sl);
1176 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1177 cm_req_set_primary_local_ack_timeout(req_msg,
1178 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1179 pri_path->packet_life_time));
1182 if (alt_path->hop_limit <= 1) {
1183 req_msg->alt_local_lid = alt_path->slid;
1184 req_msg->alt_remote_lid = alt_path->dlid;
1186 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1187 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1189 req_msg->alt_local_gid = alt_path->sgid;
1190 req_msg->alt_remote_gid = alt_path->dgid;
1191 cm_req_set_alt_flow_label(req_msg,
1192 alt_path->flow_label);
1193 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1194 req_msg->alt_traffic_class = alt_path->traffic_class;
1195 req_msg->alt_hop_limit = alt_path->hop_limit;
1196 cm_req_set_alt_sl(req_msg, alt_path->sl);
1197 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1198 cm_req_set_alt_local_ack_timeout(req_msg,
1199 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1200 alt_path->packet_life_time));
1203 if (param->private_data && param->private_data_len)
1204 memcpy(req_msg->private_data, param->private_data,
1205 param->private_data_len);
1208 static int cm_validate_req_param(struct ib_cm_req_param *param)
1210 /* peer-to-peer not supported */
1211 if (param->peer_to_peer)
1214 if (!param->primary_path)
1217 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1218 param->qp_type != IB_QPT_XRC_INI)
1221 if (param->private_data &&
1222 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1225 if (param->alternate_path &&
1226 (param->alternate_path->pkey != param->primary_path->pkey ||
1227 param->alternate_path->mtu != param->primary_path->mtu))
1233 int ib_send_cm_req(struct ib_cm_id *cm_id,
1234 struct ib_cm_req_param *param)
1236 struct cm_id_private *cm_id_priv;
1237 struct cm_req_msg *req_msg;
1238 unsigned long flags;
1241 ret = cm_validate_req_param(param);
1245 /* Verify that we're not in timewait. */
1246 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1247 spin_lock_irqsave(&cm_id_priv->lock, flags);
1248 if (cm_id->state != IB_CM_IDLE) {
1249 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1253 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1255 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1257 if (IS_ERR(cm_id_priv->timewait_info)) {
1258 ret = PTR_ERR(cm_id_priv->timewait_info);
1262 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
1266 if (param->alternate_path) {
1267 ret = cm_init_av_by_path(param->alternate_path,
1268 &cm_id_priv->alt_av, cm_id_priv);
1272 cm_id->service_id = param->service_id;
1273 cm_id->service_mask = ~cpu_to_be64(0);
1274 cm_id_priv->timeout_ms = cm_convert_to_ms(
1275 param->primary_path->packet_life_time) * 2 +
1277 param->remote_cm_response_timeout);
1278 cm_id_priv->max_cm_retries = param->max_cm_retries;
1279 cm_id_priv->initiator_depth = param->initiator_depth;
1280 cm_id_priv->responder_resources = param->responder_resources;
1281 cm_id_priv->retry_count = param->retry_count;
1282 cm_id_priv->path_mtu = param->primary_path->mtu;
1283 cm_id_priv->pkey = param->primary_path->pkey;
1284 cm_id_priv->qp_type = param->qp_type;
1286 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1290 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1291 cm_format_req(req_msg, cm_id_priv, param);
1292 cm_id_priv->tid = req_msg->hdr.tid;
1293 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1294 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1296 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1297 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1299 spin_lock_irqsave(&cm_id_priv->lock, flags);
1300 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1302 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1305 BUG_ON(cm_id->state != IB_CM_IDLE);
1306 cm_id->state = IB_CM_REQ_SENT;
1307 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1310 error2: cm_free_msg(cm_id_priv->msg);
1311 error1: kfree(cm_id_priv->timewait_info);
1314 EXPORT_SYMBOL(ib_send_cm_req);
1316 static int cm_issue_rej(struct cm_port *port,
1317 struct ib_mad_recv_wc *mad_recv_wc,
1318 enum ib_cm_rej_reason reason,
1319 enum cm_msg_response msg_rejected,
1320 void *ari, u8 ari_length)
1322 struct ib_mad_send_buf *msg = NULL;
1323 struct cm_rej_msg *rej_msg, *rcv_msg;
1326 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1330 /* We just need common CM header information. Cast to any message. */
1331 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1332 rej_msg = (struct cm_rej_msg *) msg->mad;
1334 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1335 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1336 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1337 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1338 rej_msg->reason = cpu_to_be16(reason);
1340 if (ari && ari_length) {
1341 cm_rej_set_reject_info_len(rej_msg, ari_length);
1342 memcpy(rej_msg->ari, ari, ari_length);
1345 ret = ib_post_send_mad(msg, NULL);
1352 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1353 struct ib_sa_path_rec *primary_path,
1354 struct ib_sa_path_rec *alt_path)
1356 memset(primary_path, 0, sizeof *primary_path);
1357 primary_path->dgid = req_msg->primary_local_gid;
1358 primary_path->sgid = req_msg->primary_remote_gid;
1359 primary_path->dlid = req_msg->primary_local_lid;
1360 primary_path->slid = req_msg->primary_remote_lid;
1361 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1362 primary_path->hop_limit = req_msg->primary_hop_limit;
1363 primary_path->traffic_class = req_msg->primary_traffic_class;
1364 primary_path->reversible = 1;
1365 primary_path->pkey = req_msg->pkey;
1366 primary_path->sl = cm_req_get_primary_sl(req_msg);
1367 primary_path->mtu_selector = IB_SA_EQ;
1368 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1369 primary_path->rate_selector = IB_SA_EQ;
1370 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1371 primary_path->packet_life_time_selector = IB_SA_EQ;
1372 primary_path->packet_life_time =
1373 cm_req_get_primary_local_ack_timeout(req_msg);
1374 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1375 primary_path->service_id = req_msg->service_id;
1377 if (req_msg->alt_local_lid) {
1378 memset(alt_path, 0, sizeof *alt_path);
1379 alt_path->dgid = req_msg->alt_local_gid;
1380 alt_path->sgid = req_msg->alt_remote_gid;
1381 alt_path->dlid = req_msg->alt_local_lid;
1382 alt_path->slid = req_msg->alt_remote_lid;
1383 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1384 alt_path->hop_limit = req_msg->alt_hop_limit;
1385 alt_path->traffic_class = req_msg->alt_traffic_class;
1386 alt_path->reversible = 1;
1387 alt_path->pkey = req_msg->pkey;
1388 alt_path->sl = cm_req_get_alt_sl(req_msg);
1389 alt_path->mtu_selector = IB_SA_EQ;
1390 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1391 alt_path->rate_selector = IB_SA_EQ;
1392 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1393 alt_path->packet_life_time_selector = IB_SA_EQ;
1394 alt_path->packet_life_time =
1395 cm_req_get_alt_local_ack_timeout(req_msg);
1396 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1397 alt_path->service_id = req_msg->service_id;
1401 static u16 cm_get_bth_pkey(struct cm_work *work)
1403 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1404 u8 port_num = work->port->port_num;
1405 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1409 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1411 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1412 port_num, pkey_index, ret);
1419 static void cm_format_req_event(struct cm_work *work,
1420 struct cm_id_private *cm_id_priv,
1421 struct ib_cm_id *listen_id)
1423 struct cm_req_msg *req_msg;
1424 struct ib_cm_req_event_param *param;
1426 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1427 param = &work->cm_event.param.req_rcvd;
1428 param->listen_id = listen_id;
1429 param->bth_pkey = cm_get_bth_pkey(work);
1430 param->port = cm_id_priv->av.port->port_num;
1431 param->primary_path = &work->path[0];
1432 if (req_msg->alt_local_lid)
1433 param->alternate_path = &work->path[1];
1435 param->alternate_path = NULL;
1436 param->remote_ca_guid = req_msg->local_ca_guid;
1437 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1438 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1439 param->qp_type = cm_req_get_qp_type(req_msg);
1440 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1441 param->responder_resources = cm_req_get_init_depth(req_msg);
1442 param->initiator_depth = cm_req_get_resp_res(req_msg);
1443 param->local_cm_response_timeout =
1444 cm_req_get_remote_resp_timeout(req_msg);
1445 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1446 param->remote_cm_response_timeout =
1447 cm_req_get_local_resp_timeout(req_msg);
1448 param->retry_count = cm_req_get_retry_count(req_msg);
1449 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1450 param->srq = cm_req_get_srq(req_msg);
1451 work->cm_event.private_data = &req_msg->private_data;
1454 static void cm_process_work(struct cm_id_private *cm_id_priv,
1455 struct cm_work *work)
1459 /* We will typically only have the current event to report. */
1460 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1463 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1464 spin_lock_irq(&cm_id_priv->lock);
1465 work = cm_dequeue_work(cm_id_priv);
1466 spin_unlock_irq(&cm_id_priv->lock);
1468 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1472 cm_deref_id(cm_id_priv);
1474 cm_destroy_id(&cm_id_priv->id, ret);
1477 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1478 struct cm_id_private *cm_id_priv,
1479 enum cm_msg_response msg_mraed, u8 service_timeout,
1480 const void *private_data, u8 private_data_len)
1482 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1483 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1484 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1485 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1486 cm_mra_set_service_timeout(mra_msg, service_timeout);
1488 if (private_data && private_data_len)
1489 memcpy(mra_msg->private_data, private_data, private_data_len);
1492 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1493 struct cm_id_private *cm_id_priv,
1494 enum ib_cm_rej_reason reason,
1497 const void *private_data,
1498 u8 private_data_len)
1500 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1501 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1503 switch(cm_id_priv->id.state) {
1504 case IB_CM_REQ_RCVD:
1505 rej_msg->local_comm_id = 0;
1506 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1508 case IB_CM_MRA_REQ_SENT:
1509 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1510 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1512 case IB_CM_REP_RCVD:
1513 case IB_CM_MRA_REP_SENT:
1514 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1515 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1518 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1519 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1523 rej_msg->reason = cpu_to_be16(reason);
1524 if (ari && ari_length) {
1525 cm_rej_set_reject_info_len(rej_msg, ari_length);
1526 memcpy(rej_msg->ari, ari, ari_length);
1529 if (private_data && private_data_len)
1530 memcpy(rej_msg->private_data, private_data, private_data_len);
1533 static void cm_dup_req_handler(struct cm_work *work,
1534 struct cm_id_private *cm_id_priv)
1536 struct ib_mad_send_buf *msg = NULL;
1539 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1540 counter[CM_REQ_COUNTER]);
1542 /* Quick state check to discard duplicate REQs. */
1543 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1546 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1550 spin_lock_irq(&cm_id_priv->lock);
1551 switch (cm_id_priv->id.state) {
1552 case IB_CM_MRA_REQ_SENT:
1553 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1554 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1555 cm_id_priv->private_data,
1556 cm_id_priv->private_data_len);
1558 case IB_CM_TIMEWAIT:
1559 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1560 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1565 spin_unlock_irq(&cm_id_priv->lock);
1567 ret = ib_post_send_mad(msg, NULL);
1572 unlock: spin_unlock_irq(&cm_id_priv->lock);
1573 free: cm_free_msg(msg);
1576 static struct cm_id_private * cm_match_req(struct cm_work *work,
1577 struct cm_id_private *cm_id_priv)
1579 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1580 struct cm_timewait_info *timewait_info;
1581 struct cm_req_msg *req_msg;
1583 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1585 /* Check for possible duplicate REQ. */
1586 spin_lock_irq(&cm.lock);
1587 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1588 if (timewait_info) {
1589 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1590 timewait_info->work.remote_id);
1591 spin_unlock_irq(&cm.lock);
1592 if (cur_cm_id_priv) {
1593 cm_dup_req_handler(work, cur_cm_id_priv);
1594 cm_deref_id(cur_cm_id_priv);
1599 /* Check for stale connections. */
1600 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1601 if (timewait_info) {
1602 cm_cleanup_timewait(cm_id_priv->timewait_info);
1603 spin_unlock_irq(&cm.lock);
1604 cm_issue_rej(work->port, work->mad_recv_wc,
1605 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1610 /* Find matching listen request. */
1611 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1612 req_msg->service_id);
1613 if (!listen_cm_id_priv) {
1614 cm_cleanup_timewait(cm_id_priv->timewait_info);
1615 spin_unlock_irq(&cm.lock);
1616 cm_issue_rej(work->port, work->mad_recv_wc,
1617 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1621 atomic_inc(&listen_cm_id_priv->refcount);
1622 atomic_inc(&cm_id_priv->refcount);
1623 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1624 atomic_inc(&cm_id_priv->work_count);
1625 spin_unlock_irq(&cm.lock);
1627 return listen_cm_id_priv;
1631 * Work-around for inter-subnet connections. If the LIDs are permissive,
1632 * we need to override the LID/SL data in the REQ with the LID information
1633 * in the work completion.
1635 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1637 if (!cm_req_get_primary_subnet_local(req_msg)) {
1638 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1639 req_msg->primary_local_lid = cpu_to_be16(wc->slid);
1640 cm_req_set_primary_sl(req_msg, wc->sl);
1643 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1644 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1647 if (!cm_req_get_alt_subnet_local(req_msg)) {
1648 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1649 req_msg->alt_local_lid = cpu_to_be16(wc->slid);
1650 cm_req_set_alt_sl(req_msg, wc->sl);
1653 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1654 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1658 static int cm_req_handler(struct cm_work *work)
1660 struct ib_cm_id *cm_id;
1661 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1662 struct cm_req_msg *req_msg;
1664 struct ib_gid_attr gid_attr;
1667 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1669 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1671 return PTR_ERR(cm_id);
1673 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1674 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1675 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1676 work->mad_recv_wc->recv_buf.grh,
1678 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1680 if (IS_ERR(cm_id_priv->timewait_info)) {
1681 ret = PTR_ERR(cm_id_priv->timewait_info);
1684 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1685 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1686 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1688 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1689 if (!listen_cm_id_priv) {
1691 kfree(cm_id_priv->timewait_info);
1695 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1696 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1697 cm_id_priv->id.service_id = req_msg->service_id;
1698 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1700 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1701 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1703 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
1704 work->path[0].hop_limit = cm_id_priv->av.ah_attr.grh.hop_limit;
1705 ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
1706 work->port->port_num,
1707 cm_id_priv->av.ah_attr.grh.sgid_index,
1710 if (gid_attr.ndev) {
1711 work->path[0].ifindex = gid_attr.ndev->if_index;
1712 work->path[0].net = dev_net(gid_attr.ndev);
1713 dev_put(gid_attr.ndev);
1715 work->path[0].gid_type = gid_attr.gid_type;
1716 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
1720 int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
1721 work->port->port_num, 0,
1722 &work->path[0].sgid,
1724 if (!err && gid_attr.ndev) {
1725 work->path[0].ifindex = gid_attr.ndev->if_index;
1726 work->path[0].net = dev_net(gid_attr.ndev);
1727 dev_put(gid_attr.ndev);
1729 work->path[0].gid_type = gid_attr.gid_type;
1730 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1731 &work->path[0].sgid, sizeof work->path[0].sgid,
1735 if (req_msg->alt_local_lid) {
1736 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
1739 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1740 &work->path[0].sgid,
1741 sizeof work->path[0].sgid, NULL, 0);
1745 cm_id_priv->tid = req_msg->hdr.tid;
1746 cm_id_priv->timeout_ms = cm_convert_to_ms(
1747 cm_req_get_local_resp_timeout(req_msg));
1748 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1749 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1750 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1751 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1752 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1753 cm_id_priv->pkey = req_msg->pkey;
1754 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1755 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1756 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1757 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1759 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1760 cm_process_work(cm_id_priv, work);
1761 cm_deref_id(listen_cm_id_priv);
1765 atomic_dec(&cm_id_priv->refcount);
1766 cm_deref_id(listen_cm_id_priv);
1768 ib_destroy_cm_id(cm_id);
1772 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1773 struct cm_id_private *cm_id_priv,
1774 struct ib_cm_rep_param *param)
1776 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1777 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1778 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1779 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1780 rep_msg->resp_resources = param->responder_resources;
1781 cm_rep_set_target_ack_delay(rep_msg,
1782 cm_id_priv->av.port->cm_dev->ack_delay);
1783 cm_rep_set_failover(rep_msg, param->failover_accepted);
1784 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1785 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1787 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
1788 rep_msg->initiator_depth = param->initiator_depth;
1789 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1790 cm_rep_set_srq(rep_msg, param->srq);
1791 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1793 cm_rep_set_srq(rep_msg, 1);
1794 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
1797 if (param->private_data && param->private_data_len)
1798 memcpy(rep_msg->private_data, param->private_data,
1799 param->private_data_len);
1802 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1803 struct ib_cm_rep_param *param)
1805 struct cm_id_private *cm_id_priv;
1806 struct ib_mad_send_buf *msg;
1807 struct cm_rep_msg *rep_msg;
1808 unsigned long flags;
1811 if (param->private_data &&
1812 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1815 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1816 spin_lock_irqsave(&cm_id_priv->lock, flags);
1817 if (cm_id->state != IB_CM_REQ_RCVD &&
1818 cm_id->state != IB_CM_MRA_REQ_SENT) {
1823 ret = cm_alloc_msg(cm_id_priv, &msg);
1827 rep_msg = (struct cm_rep_msg *) msg->mad;
1828 cm_format_rep(rep_msg, cm_id_priv, param);
1829 msg->timeout_ms = cm_id_priv->timeout_ms;
1830 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1832 ret = ib_post_send_mad(msg, NULL);
1834 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1839 cm_id->state = IB_CM_REP_SENT;
1840 cm_id_priv->msg = msg;
1841 cm_id_priv->initiator_depth = param->initiator_depth;
1842 cm_id_priv->responder_resources = param->responder_resources;
1843 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1844 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
1846 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1849 EXPORT_SYMBOL(ib_send_cm_rep);
1851 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1852 struct cm_id_private *cm_id_priv,
1853 const void *private_data,
1854 u8 private_data_len)
1856 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1857 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1858 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1860 if (private_data && private_data_len)
1861 memcpy(rtu_msg->private_data, private_data, private_data_len);
1864 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1865 const void *private_data,
1866 u8 private_data_len)
1868 struct cm_id_private *cm_id_priv;
1869 struct ib_mad_send_buf *msg;
1870 unsigned long flags;
1874 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1877 data = cm_copy_private_data(private_data, private_data_len);
1879 return PTR_ERR(data);
1881 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1882 spin_lock_irqsave(&cm_id_priv->lock, flags);
1883 if (cm_id->state != IB_CM_REP_RCVD &&
1884 cm_id->state != IB_CM_MRA_REP_SENT) {
1889 ret = cm_alloc_msg(cm_id_priv, &msg);
1893 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1894 private_data, private_data_len);
1896 ret = ib_post_send_mad(msg, NULL);
1898 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1904 cm_id->state = IB_CM_ESTABLISHED;
1905 cm_set_private_data(cm_id_priv, data, private_data_len);
1906 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1909 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1913 EXPORT_SYMBOL(ib_send_cm_rtu);
1915 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
1917 struct cm_rep_msg *rep_msg;
1918 struct ib_cm_rep_event_param *param;
1920 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1921 param = &work->cm_event.param.rep_rcvd;
1922 param->remote_ca_guid = rep_msg->local_ca_guid;
1923 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1924 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
1925 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1926 param->responder_resources = rep_msg->initiator_depth;
1927 param->initiator_depth = rep_msg->resp_resources;
1928 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1929 param->failover_accepted = cm_rep_get_failover(rep_msg);
1930 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1931 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1932 param->srq = cm_rep_get_srq(rep_msg);
1933 work->cm_event.private_data = &rep_msg->private_data;
1936 static void cm_dup_rep_handler(struct cm_work *work)
1938 struct cm_id_private *cm_id_priv;
1939 struct cm_rep_msg *rep_msg;
1940 struct ib_mad_send_buf *msg = NULL;
1943 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1944 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1945 rep_msg->local_comm_id);
1949 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1950 counter[CM_REP_COUNTER]);
1951 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1955 spin_lock_irq(&cm_id_priv->lock);
1956 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1957 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1958 cm_id_priv->private_data,
1959 cm_id_priv->private_data_len);
1960 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1961 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1962 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1963 cm_id_priv->private_data,
1964 cm_id_priv->private_data_len);
1967 spin_unlock_irq(&cm_id_priv->lock);
1969 ret = ib_post_send_mad(msg, NULL);
1974 unlock: spin_unlock_irq(&cm_id_priv->lock);
1975 free: cm_free_msg(msg);
1976 deref: cm_deref_id(cm_id_priv);
1979 static int cm_rep_handler(struct cm_work *work)
1981 struct cm_id_private *cm_id_priv;
1982 struct cm_rep_msg *rep_msg;
1985 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1986 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1988 cm_dup_rep_handler(work);
1992 cm_format_rep_event(work, cm_id_priv->qp_type);
1994 spin_lock_irq(&cm_id_priv->lock);
1995 switch (cm_id_priv->id.state) {
1996 case IB_CM_REQ_SENT:
1997 case IB_CM_MRA_REQ_RCVD:
2000 spin_unlock_irq(&cm_id_priv->lock);
2005 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
2006 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
2007 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2009 spin_lock(&cm.lock);
2010 /* Check for duplicate REP. */
2011 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2012 spin_unlock(&cm.lock);
2013 spin_unlock_irq(&cm_id_priv->lock);
2017 /* Check for a stale connection. */
2018 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
2019 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2020 &cm.remote_id_table);
2021 cm_id_priv->timewait_info->inserted_remote_id = 0;
2022 spin_unlock(&cm.lock);
2023 spin_unlock_irq(&cm_id_priv->lock);
2024 cm_issue_rej(work->port, work->mad_recv_wc,
2025 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2030 spin_unlock(&cm.lock);
2032 cm_id_priv->id.state = IB_CM_REP_RCVD;
2033 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2034 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2035 cm_id_priv->initiator_depth = rep_msg->resp_resources;
2036 cm_id_priv->responder_resources = rep_msg->initiator_depth;
2037 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2038 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2039 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2040 cm_id_priv->av.timeout =
2041 cm_ack_timeout(cm_id_priv->target_ack_delay,
2042 cm_id_priv->av.timeout - 1);
2043 cm_id_priv->alt_av.timeout =
2044 cm_ack_timeout(cm_id_priv->target_ack_delay,
2045 cm_id_priv->alt_av.timeout - 1);
2047 /* todo: handle peer_to_peer */
2049 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2050 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2052 list_add_tail(&work->list, &cm_id_priv->work_list);
2053 spin_unlock_irq(&cm_id_priv->lock);
2056 cm_process_work(cm_id_priv, work);
2058 cm_deref_id(cm_id_priv);
2062 cm_deref_id(cm_id_priv);
2066 static int cm_establish_handler(struct cm_work *work)
2068 struct cm_id_private *cm_id_priv;
2071 /* See comment in cm_establish about lookup. */
2072 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2076 spin_lock_irq(&cm_id_priv->lock);
2077 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2078 spin_unlock_irq(&cm_id_priv->lock);
2082 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2083 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2085 list_add_tail(&work->list, &cm_id_priv->work_list);
2086 spin_unlock_irq(&cm_id_priv->lock);
2089 cm_process_work(cm_id_priv, work);
2091 cm_deref_id(cm_id_priv);
2094 cm_deref_id(cm_id_priv);
2098 static int cm_rtu_handler(struct cm_work *work)
2100 struct cm_id_private *cm_id_priv;
2101 struct cm_rtu_msg *rtu_msg;
2104 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2105 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2106 rtu_msg->local_comm_id);
2110 work->cm_event.private_data = &rtu_msg->private_data;
2112 spin_lock_irq(&cm_id_priv->lock);
2113 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2114 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2115 spin_unlock_irq(&cm_id_priv->lock);
2116 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2117 counter[CM_RTU_COUNTER]);
2120 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2122 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2123 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2125 list_add_tail(&work->list, &cm_id_priv->work_list);
2126 spin_unlock_irq(&cm_id_priv->lock);
2129 cm_process_work(cm_id_priv, work);
2131 cm_deref_id(cm_id_priv);
2134 cm_deref_id(cm_id_priv);
2138 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2139 struct cm_id_private *cm_id_priv,
2140 const void *private_data,
2141 u8 private_data_len)
2143 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2144 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2145 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2146 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2147 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2149 if (private_data && private_data_len)
2150 memcpy(dreq_msg->private_data, private_data, private_data_len);
2153 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2154 const void *private_data,
2155 u8 private_data_len)
2157 struct cm_id_private *cm_id_priv;
2158 struct ib_mad_send_buf *msg;
2159 unsigned long flags;
2162 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2165 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2166 spin_lock_irqsave(&cm_id_priv->lock, flags);
2167 if (cm_id->state != IB_CM_ESTABLISHED) {
2172 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2173 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2174 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2176 ret = cm_alloc_msg(cm_id_priv, &msg);
2178 cm_enter_timewait(cm_id_priv);
2182 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2183 private_data, private_data_len);
2184 msg->timeout_ms = cm_id_priv->timeout_ms;
2185 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2187 ret = ib_post_send_mad(msg, NULL);
2189 cm_enter_timewait(cm_id_priv);
2190 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2195 cm_id->state = IB_CM_DREQ_SENT;
2196 cm_id_priv->msg = msg;
2197 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2200 EXPORT_SYMBOL(ib_send_cm_dreq);
2202 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2203 struct cm_id_private *cm_id_priv,
2204 const void *private_data,
2205 u8 private_data_len)
2207 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2208 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2209 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2211 if (private_data && private_data_len)
2212 memcpy(drep_msg->private_data, private_data, private_data_len);
2215 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2216 const void *private_data,
2217 u8 private_data_len)
2219 struct cm_id_private *cm_id_priv;
2220 struct ib_mad_send_buf *msg;
2221 unsigned long flags;
2225 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2228 data = cm_copy_private_data(private_data, private_data_len);
2230 return PTR_ERR(data);
2232 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2233 spin_lock_irqsave(&cm_id_priv->lock, flags);
2234 if (cm_id->state != IB_CM_DREQ_RCVD) {
2235 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2240 cm_set_private_data(cm_id_priv, data, private_data_len);
2241 cm_enter_timewait(cm_id_priv);
2243 ret = cm_alloc_msg(cm_id_priv, &msg);
2247 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2248 private_data, private_data_len);
2250 ret = ib_post_send_mad(msg, NULL);
2252 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2257 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2260 EXPORT_SYMBOL(ib_send_cm_drep);
2262 static int cm_issue_drep(struct cm_port *port,
2263 struct ib_mad_recv_wc *mad_recv_wc)
2265 struct ib_mad_send_buf *msg = NULL;
2266 struct cm_dreq_msg *dreq_msg;
2267 struct cm_drep_msg *drep_msg;
2270 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2274 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2275 drep_msg = (struct cm_drep_msg *) msg->mad;
2277 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2278 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2279 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2281 ret = ib_post_send_mad(msg, NULL);
2288 static int cm_dreq_handler(struct cm_work *work)
2290 struct cm_id_private *cm_id_priv;
2291 struct cm_dreq_msg *dreq_msg;
2292 struct ib_mad_send_buf *msg = NULL;
2295 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2296 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2297 dreq_msg->local_comm_id);
2299 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2300 counter[CM_DREQ_COUNTER]);
2301 cm_issue_drep(work->port, work->mad_recv_wc);
2305 work->cm_event.private_data = &dreq_msg->private_data;
2307 spin_lock_irq(&cm_id_priv->lock);
2308 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2311 switch (cm_id_priv->id.state) {
2312 case IB_CM_REP_SENT:
2313 case IB_CM_DREQ_SENT:
2314 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2316 case IB_CM_ESTABLISHED:
2317 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2318 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2319 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2321 case IB_CM_MRA_REP_RCVD:
2323 case IB_CM_TIMEWAIT:
2324 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2325 counter[CM_DREQ_COUNTER]);
2326 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2329 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2330 cm_id_priv->private_data,
2331 cm_id_priv->private_data_len);
2332 spin_unlock_irq(&cm_id_priv->lock);
2334 if (ib_post_send_mad(msg, NULL))
2337 case IB_CM_DREQ_RCVD:
2338 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2339 counter[CM_DREQ_COUNTER]);
2344 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2345 cm_id_priv->tid = dreq_msg->hdr.tid;
2346 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2348 list_add_tail(&work->list, &cm_id_priv->work_list);
2349 spin_unlock_irq(&cm_id_priv->lock);
2352 cm_process_work(cm_id_priv, work);
2354 cm_deref_id(cm_id_priv);
2357 unlock: spin_unlock_irq(&cm_id_priv->lock);
2358 deref: cm_deref_id(cm_id_priv);
2362 static int cm_drep_handler(struct cm_work *work)
2364 struct cm_id_private *cm_id_priv;
2365 struct cm_drep_msg *drep_msg;
2368 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2369 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2370 drep_msg->local_comm_id);
2374 work->cm_event.private_data = &drep_msg->private_data;
2376 spin_lock_irq(&cm_id_priv->lock);
2377 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2378 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2379 spin_unlock_irq(&cm_id_priv->lock);
2382 cm_enter_timewait(cm_id_priv);
2384 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2385 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2387 list_add_tail(&work->list, &cm_id_priv->work_list);
2388 spin_unlock_irq(&cm_id_priv->lock);
2391 cm_process_work(cm_id_priv, work);
2393 cm_deref_id(cm_id_priv);
2396 cm_deref_id(cm_id_priv);
2400 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2401 enum ib_cm_rej_reason reason,
2404 const void *private_data,
2405 u8 private_data_len)
2407 struct cm_id_private *cm_id_priv;
2408 struct ib_mad_send_buf *msg;
2409 unsigned long flags;
2412 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2413 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2416 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2418 spin_lock_irqsave(&cm_id_priv->lock, flags);
2419 switch (cm_id->state) {
2420 case IB_CM_REQ_SENT:
2421 case IB_CM_MRA_REQ_RCVD:
2422 case IB_CM_REQ_RCVD:
2423 case IB_CM_MRA_REQ_SENT:
2424 case IB_CM_REP_RCVD:
2425 case IB_CM_MRA_REP_SENT:
2426 ret = cm_alloc_msg(cm_id_priv, &msg);
2428 cm_format_rej((struct cm_rej_msg *) msg->mad,
2429 cm_id_priv, reason, ari, ari_length,
2430 private_data, private_data_len);
2432 cm_reset_to_idle(cm_id_priv);
2434 case IB_CM_REP_SENT:
2435 case IB_CM_MRA_REP_RCVD:
2436 ret = cm_alloc_msg(cm_id_priv, &msg);
2438 cm_format_rej((struct cm_rej_msg *) msg->mad,
2439 cm_id_priv, reason, ari, ari_length,
2440 private_data, private_data_len);
2442 cm_enter_timewait(cm_id_priv);
2452 ret = ib_post_send_mad(msg, NULL);
2456 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2459 EXPORT_SYMBOL(ib_send_cm_rej);
2461 static void cm_format_rej_event(struct cm_work *work)
2463 struct cm_rej_msg *rej_msg;
2464 struct ib_cm_rej_event_param *param;
2466 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2467 param = &work->cm_event.param.rej_rcvd;
2468 param->ari = rej_msg->ari;
2469 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2470 param->reason = __be16_to_cpu(rej_msg->reason);
2471 work->cm_event.private_data = &rej_msg->private_data;
2474 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2476 struct cm_timewait_info *timewait_info;
2477 struct cm_id_private *cm_id_priv;
2480 remote_id = rej_msg->local_comm_id;
2482 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2483 spin_lock_irq(&cm.lock);
2484 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2486 if (!timewait_info) {
2487 spin_unlock_irq(&cm.lock);
2490 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2491 (timewait_info->work.local_id ^
2492 cm.random_id_operand));
2494 if (cm_id_priv->id.remote_id == remote_id)
2495 atomic_inc(&cm_id_priv->refcount);
2499 spin_unlock_irq(&cm.lock);
2500 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2501 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2503 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2508 static int cm_rej_handler(struct cm_work *work)
2510 struct cm_id_private *cm_id_priv;
2511 struct cm_rej_msg *rej_msg;
2514 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2515 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2519 cm_format_rej_event(work);
2521 spin_lock_irq(&cm_id_priv->lock);
2522 switch (cm_id_priv->id.state) {
2523 case IB_CM_REQ_SENT:
2524 case IB_CM_MRA_REQ_RCVD:
2525 case IB_CM_REP_SENT:
2526 case IB_CM_MRA_REP_RCVD:
2527 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2529 case IB_CM_REQ_RCVD:
2530 case IB_CM_MRA_REQ_SENT:
2531 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2532 cm_enter_timewait(cm_id_priv);
2534 cm_reset_to_idle(cm_id_priv);
2536 case IB_CM_DREQ_SENT:
2537 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2539 case IB_CM_REP_RCVD:
2540 case IB_CM_MRA_REP_SENT:
2541 cm_enter_timewait(cm_id_priv);
2543 case IB_CM_ESTABLISHED:
2544 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2545 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2546 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2547 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2549 cm_enter_timewait(cm_id_priv);
2554 spin_unlock_irq(&cm_id_priv->lock);
2559 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2561 list_add_tail(&work->list, &cm_id_priv->work_list);
2562 spin_unlock_irq(&cm_id_priv->lock);
2565 cm_process_work(cm_id_priv, work);
2567 cm_deref_id(cm_id_priv);
2570 cm_deref_id(cm_id_priv);
2574 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2576 const void *private_data,
2577 u8 private_data_len)
2579 struct cm_id_private *cm_id_priv;
2580 struct ib_mad_send_buf *msg;
2581 enum ib_cm_state cm_state;
2582 enum ib_cm_lap_state lap_state;
2583 enum cm_msg_response msg_response;
2585 unsigned long flags;
2588 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2591 data = cm_copy_private_data(private_data, private_data_len);
2593 return PTR_ERR(data);
2595 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2597 spin_lock_irqsave(&cm_id_priv->lock, flags);
2598 switch(cm_id_priv->id.state) {
2599 case IB_CM_REQ_RCVD:
2600 cm_state = IB_CM_MRA_REQ_SENT;
2601 lap_state = cm_id->lap_state;
2602 msg_response = CM_MSG_RESPONSE_REQ;
2604 case IB_CM_REP_RCVD:
2605 cm_state = IB_CM_MRA_REP_SENT;
2606 lap_state = cm_id->lap_state;
2607 msg_response = CM_MSG_RESPONSE_REP;
2609 case IB_CM_ESTABLISHED:
2610 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2611 cm_state = cm_id->state;
2612 lap_state = IB_CM_MRA_LAP_SENT;
2613 msg_response = CM_MSG_RESPONSE_OTHER;
2621 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2622 ret = cm_alloc_msg(cm_id_priv, &msg);
2626 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2627 msg_response, service_timeout,
2628 private_data, private_data_len);
2629 ret = ib_post_send_mad(msg, NULL);
2634 cm_id->state = cm_state;
2635 cm_id->lap_state = lap_state;
2636 cm_id_priv->service_timeout = service_timeout;
2637 cm_set_private_data(cm_id_priv, data, private_data_len);
2638 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2641 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2645 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2650 EXPORT_SYMBOL(ib_send_cm_mra);
2652 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2654 switch (cm_mra_get_msg_mraed(mra_msg)) {
2655 case CM_MSG_RESPONSE_REQ:
2656 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2657 case CM_MSG_RESPONSE_REP:
2658 case CM_MSG_RESPONSE_OTHER:
2659 return cm_acquire_id(mra_msg->remote_comm_id,
2660 mra_msg->local_comm_id);
2666 static int cm_mra_handler(struct cm_work *work)
2668 struct cm_id_private *cm_id_priv;
2669 struct cm_mra_msg *mra_msg;
2672 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2673 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2677 work->cm_event.private_data = &mra_msg->private_data;
2678 work->cm_event.param.mra_rcvd.service_timeout =
2679 cm_mra_get_service_timeout(mra_msg);
2680 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2681 cm_convert_to_ms(cm_id_priv->av.timeout);
2683 spin_lock_irq(&cm_id_priv->lock);
2684 switch (cm_id_priv->id.state) {
2685 case IB_CM_REQ_SENT:
2686 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2687 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2688 cm_id_priv->msg, timeout))
2690 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2692 case IB_CM_REP_SENT:
2693 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2694 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2695 cm_id_priv->msg, timeout))
2697 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2699 case IB_CM_ESTABLISHED:
2700 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2701 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2702 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2703 cm_id_priv->msg, timeout)) {
2704 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2705 atomic_long_inc(&work->port->
2706 counter_group[CM_RECV_DUPLICATES].
2707 counter[CM_MRA_COUNTER]);
2710 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2712 case IB_CM_MRA_REQ_RCVD:
2713 case IB_CM_MRA_REP_RCVD:
2714 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2715 counter[CM_MRA_COUNTER]);
2721 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2722 cm_id_priv->id.state;
2723 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2725 list_add_tail(&work->list, &cm_id_priv->work_list);
2726 spin_unlock_irq(&cm_id_priv->lock);
2729 cm_process_work(cm_id_priv, work);
2731 cm_deref_id(cm_id_priv);
2734 spin_unlock_irq(&cm_id_priv->lock);
2735 cm_deref_id(cm_id_priv);
2739 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2740 struct cm_id_private *cm_id_priv,
2741 struct ib_sa_path_rec *alternate_path,
2742 const void *private_data,
2743 u8 private_data_len)
2745 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2746 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2747 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2748 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2749 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2750 /* todo: need remote CM response timeout */
2751 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2752 lap_msg->alt_local_lid = alternate_path->slid;
2753 lap_msg->alt_remote_lid = alternate_path->dlid;
2754 lap_msg->alt_local_gid = alternate_path->sgid;
2755 lap_msg->alt_remote_gid = alternate_path->dgid;
2756 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2757 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2758 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2759 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2760 cm_lap_set_sl(lap_msg, alternate_path->sl);
2761 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2762 cm_lap_set_local_ack_timeout(lap_msg,
2763 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2764 alternate_path->packet_life_time));
2766 if (private_data && private_data_len)
2767 memcpy(lap_msg->private_data, private_data, private_data_len);
2770 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2771 struct ib_sa_path_rec *alternate_path,
2772 const void *private_data,
2773 u8 private_data_len)
2775 struct cm_id_private *cm_id_priv;
2776 struct ib_mad_send_buf *msg;
2777 unsigned long flags;
2780 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2783 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2784 spin_lock_irqsave(&cm_id_priv->lock, flags);
2785 if (cm_id->state != IB_CM_ESTABLISHED ||
2786 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2787 cm_id->lap_state != IB_CM_LAP_IDLE)) {
2792 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
2796 cm_id_priv->alt_av.timeout =
2797 cm_ack_timeout(cm_id_priv->target_ack_delay,
2798 cm_id_priv->alt_av.timeout - 1);
2800 ret = cm_alloc_msg(cm_id_priv, &msg);
2804 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2805 alternate_path, private_data, private_data_len);
2806 msg->timeout_ms = cm_id_priv->timeout_ms;
2807 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2809 ret = ib_post_send_mad(msg, NULL);
2811 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2816 cm_id->lap_state = IB_CM_LAP_SENT;
2817 cm_id_priv->msg = msg;
2819 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2822 EXPORT_SYMBOL(ib_send_cm_lap);
2824 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2825 struct ib_sa_path_rec *path,
2826 struct cm_lap_msg *lap_msg)
2828 memset(path, 0, sizeof *path);
2829 path->dgid = lap_msg->alt_local_gid;
2830 path->sgid = lap_msg->alt_remote_gid;
2831 path->dlid = lap_msg->alt_local_lid;
2832 path->slid = lap_msg->alt_remote_lid;
2833 path->flow_label = cm_lap_get_flow_label(lap_msg);
2834 path->hop_limit = lap_msg->alt_hop_limit;
2835 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2836 path->reversible = 1;
2837 path->pkey = cm_id_priv->pkey;
2838 path->sl = cm_lap_get_sl(lap_msg);
2839 path->mtu_selector = IB_SA_EQ;
2840 path->mtu = cm_id_priv->path_mtu;
2841 path->rate_selector = IB_SA_EQ;
2842 path->rate = cm_lap_get_packet_rate(lap_msg);
2843 path->packet_life_time_selector = IB_SA_EQ;
2844 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2845 path->packet_life_time -= (path->packet_life_time > 0);
2848 static int cm_lap_handler(struct cm_work *work)
2850 struct cm_id_private *cm_id_priv;
2851 struct cm_lap_msg *lap_msg;
2852 struct ib_cm_lap_event_param *param;
2853 struct ib_mad_send_buf *msg = NULL;
2856 /* todo: verify LAP request and send reject APR if invalid. */
2857 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2858 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2859 lap_msg->local_comm_id);
2863 param = &work->cm_event.param.lap_rcvd;
2864 param->alternate_path = &work->path[0];
2865 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2866 work->cm_event.private_data = &lap_msg->private_data;
2868 spin_lock_irq(&cm_id_priv->lock);
2869 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2872 switch (cm_id_priv->id.lap_state) {
2873 case IB_CM_LAP_UNINIT:
2874 case IB_CM_LAP_IDLE:
2876 case IB_CM_MRA_LAP_SENT:
2877 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2878 counter[CM_LAP_COUNTER]);
2879 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2882 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2883 CM_MSG_RESPONSE_OTHER,
2884 cm_id_priv->service_timeout,
2885 cm_id_priv->private_data,
2886 cm_id_priv->private_data_len);
2887 spin_unlock_irq(&cm_id_priv->lock);
2889 if (ib_post_send_mad(msg, NULL))
2892 case IB_CM_LAP_RCVD:
2893 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2894 counter[CM_LAP_COUNTER]);
2900 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2901 cm_id_priv->tid = lap_msg->hdr.tid;
2902 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2903 work->mad_recv_wc->recv_buf.grh,
2905 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
2907 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2909 list_add_tail(&work->list, &cm_id_priv->work_list);
2910 spin_unlock_irq(&cm_id_priv->lock);
2913 cm_process_work(cm_id_priv, work);
2915 cm_deref_id(cm_id_priv);
2918 unlock: spin_unlock_irq(&cm_id_priv->lock);
2919 deref: cm_deref_id(cm_id_priv);
2923 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2924 struct cm_id_private *cm_id_priv,
2925 enum ib_cm_apr_status status,
2928 const void *private_data,
2929 u8 private_data_len)
2931 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2932 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2933 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2934 apr_msg->ap_status = (u8) status;
2936 if (info && info_length) {
2937 apr_msg->info_length = info_length;
2938 memcpy(apr_msg->info, info, info_length);
2941 if (private_data && private_data_len)
2942 memcpy(apr_msg->private_data, private_data, private_data_len);
2945 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2946 enum ib_cm_apr_status status,
2949 const void *private_data,
2950 u8 private_data_len)
2952 struct cm_id_private *cm_id_priv;
2953 struct ib_mad_send_buf *msg;
2954 unsigned long flags;
2957 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2958 (info && info_length > IB_CM_APR_INFO_LENGTH))
2961 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2962 spin_lock_irqsave(&cm_id_priv->lock, flags);
2963 if (cm_id->state != IB_CM_ESTABLISHED ||
2964 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2965 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2970 ret = cm_alloc_msg(cm_id_priv, &msg);
2974 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2975 info, info_length, private_data, private_data_len);
2976 ret = ib_post_send_mad(msg, NULL);
2978 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2983 cm_id->lap_state = IB_CM_LAP_IDLE;
2984 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2987 EXPORT_SYMBOL(ib_send_cm_apr);
2989 static int cm_apr_handler(struct cm_work *work)
2991 struct cm_id_private *cm_id_priv;
2992 struct cm_apr_msg *apr_msg;
2995 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2996 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2997 apr_msg->local_comm_id);
2999 return -EINVAL; /* Unmatched reply. */
3001 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
3002 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
3003 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
3004 work->cm_event.private_data = &apr_msg->private_data;
3006 spin_lock_irq(&cm_id_priv->lock);
3007 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3008 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3009 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3010 spin_unlock_irq(&cm_id_priv->lock);
3013 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3014 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3015 cm_id_priv->msg = NULL;
3017 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3019 list_add_tail(&work->list, &cm_id_priv->work_list);
3020 spin_unlock_irq(&cm_id_priv->lock);
3023 cm_process_work(cm_id_priv, work);
3025 cm_deref_id(cm_id_priv);
3028 cm_deref_id(cm_id_priv);
3032 static int cm_timewait_handler(struct cm_work *work)
3034 struct cm_timewait_info *timewait_info;
3035 struct cm_id_private *cm_id_priv;
3038 timewait_info = (struct cm_timewait_info *)work;
3039 spin_lock_irq(&cm.lock);
3040 list_del(&timewait_info->list);
3041 spin_unlock_irq(&cm.lock);
3043 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3044 timewait_info->work.remote_id);
3048 spin_lock_irq(&cm_id_priv->lock);
3049 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3050 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3051 spin_unlock_irq(&cm_id_priv->lock);
3054 cm_id_priv->id.state = IB_CM_IDLE;
3055 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3057 list_add_tail(&work->list, &cm_id_priv->work_list);
3058 spin_unlock_irq(&cm_id_priv->lock);
3061 cm_process_work(cm_id_priv, work);
3063 cm_deref_id(cm_id_priv);
3066 cm_deref_id(cm_id_priv);
3070 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3071 struct cm_id_private *cm_id_priv,
3072 struct ib_cm_sidr_req_param *param)
3074 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3075 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
3076 sidr_req_msg->request_id = cm_id_priv->id.local_id;
3077 sidr_req_msg->pkey = param->path->pkey;
3078 sidr_req_msg->service_id = param->service_id;
3080 if (param->private_data && param->private_data_len)
3081 memcpy(sidr_req_msg->private_data, param->private_data,
3082 param->private_data_len);
3085 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3086 struct ib_cm_sidr_req_param *param)
3088 struct cm_id_private *cm_id_priv;
3089 struct ib_mad_send_buf *msg;
3090 unsigned long flags;
3093 if (!param->path || (param->private_data &&
3094 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3097 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3098 ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
3102 cm_id->service_id = param->service_id;
3103 cm_id->service_mask = ~cpu_to_be64(0);
3104 cm_id_priv->timeout_ms = param->timeout_ms;
3105 cm_id_priv->max_cm_retries = param->max_cm_retries;
3106 ret = cm_alloc_msg(cm_id_priv, &msg);
3110 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3112 msg->timeout_ms = cm_id_priv->timeout_ms;
3113 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3115 spin_lock_irqsave(&cm_id_priv->lock, flags);
3116 if (cm_id->state == IB_CM_IDLE)
3117 ret = ib_post_send_mad(msg, NULL);
3122 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3126 cm_id->state = IB_CM_SIDR_REQ_SENT;
3127 cm_id_priv->msg = msg;
3128 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3132 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3134 static void cm_format_sidr_req_event(struct cm_work *work,
3135 struct ib_cm_id *listen_id)
3137 struct cm_sidr_req_msg *sidr_req_msg;
3138 struct ib_cm_sidr_req_event_param *param;
3140 sidr_req_msg = (struct cm_sidr_req_msg *)
3141 work->mad_recv_wc->recv_buf.mad;
3142 param = &work->cm_event.param.sidr_req_rcvd;
3143 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3144 param->listen_id = listen_id;
3145 param->service_id = sidr_req_msg->service_id;
3146 param->bth_pkey = cm_get_bth_pkey(work);
3147 param->port = work->port->port_num;
3148 work->cm_event.private_data = &sidr_req_msg->private_data;
3151 static int cm_sidr_req_handler(struct cm_work *work)
3153 struct ib_cm_id *cm_id;
3154 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3155 struct cm_sidr_req_msg *sidr_req_msg;
3158 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3160 return PTR_ERR(cm_id);
3161 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3163 /* Record SGID/SLID and request ID for lookup. */
3164 sidr_req_msg = (struct cm_sidr_req_msg *)
3165 work->mad_recv_wc->recv_buf.mad;
3166 wc = work->mad_recv_wc->wc;
3167 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3168 cm_id_priv->av.dgid.global.interface_id = 0;
3169 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3170 work->mad_recv_wc->recv_buf.grh,
3172 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3173 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3174 atomic_inc(&cm_id_priv->work_count);
3176 spin_lock_irq(&cm.lock);
3177 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3178 if (cur_cm_id_priv) {
3179 spin_unlock_irq(&cm.lock);
3180 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3181 counter[CM_SIDR_REQ_COUNTER]);
3182 goto out; /* Duplicate message. */
3184 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3185 cur_cm_id_priv = cm_find_listen(cm_id->device,
3186 sidr_req_msg->service_id);
3187 if (!cur_cm_id_priv) {
3188 spin_unlock_irq(&cm.lock);
3189 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3190 goto out; /* No match. */
3192 atomic_inc(&cur_cm_id_priv->refcount);
3193 atomic_inc(&cm_id_priv->refcount);
3194 spin_unlock_irq(&cm.lock);
3196 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3197 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3198 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3199 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3201 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3202 cm_process_work(cm_id_priv, work);
3203 cm_deref_id(cur_cm_id_priv);
3206 ib_destroy_cm_id(&cm_id_priv->id);
3210 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3211 struct cm_id_private *cm_id_priv,
3212 struct ib_cm_sidr_rep_param *param)
3214 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3216 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3217 sidr_rep_msg->status = param->status;
3218 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3219 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3220 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3222 if (param->info && param->info_length)
3223 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3225 if (param->private_data && param->private_data_len)
3226 memcpy(sidr_rep_msg->private_data, param->private_data,
3227 param->private_data_len);
3230 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3231 struct ib_cm_sidr_rep_param *param)
3233 struct cm_id_private *cm_id_priv;
3234 struct ib_mad_send_buf *msg;
3235 unsigned long flags;
3238 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3239 (param->private_data &&
3240 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3243 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3244 spin_lock_irqsave(&cm_id_priv->lock, flags);
3245 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3250 ret = cm_alloc_msg(cm_id_priv, &msg);
3254 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3256 ret = ib_post_send_mad(msg, NULL);
3258 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3262 cm_id->state = IB_CM_IDLE;
3263 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3265 spin_lock_irqsave(&cm.lock, flags);
3266 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3267 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3268 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3270 spin_unlock_irqrestore(&cm.lock, flags);
3273 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3276 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3278 static void cm_format_sidr_rep_event(struct cm_work *work)
3280 struct cm_sidr_rep_msg *sidr_rep_msg;
3281 struct ib_cm_sidr_rep_event_param *param;
3283 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3284 work->mad_recv_wc->recv_buf.mad;
3285 param = &work->cm_event.param.sidr_rep_rcvd;
3286 param->status = sidr_rep_msg->status;
3287 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3288 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3289 param->info = &sidr_rep_msg->info;
3290 param->info_len = sidr_rep_msg->info_length;
3291 work->cm_event.private_data = &sidr_rep_msg->private_data;
3294 static int cm_sidr_rep_handler(struct cm_work *work)
3296 struct cm_sidr_rep_msg *sidr_rep_msg;
3297 struct cm_id_private *cm_id_priv;
3299 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3300 work->mad_recv_wc->recv_buf.mad;
3301 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3303 return -EINVAL; /* Unmatched reply. */
3305 spin_lock_irq(&cm_id_priv->lock);
3306 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3307 spin_unlock_irq(&cm_id_priv->lock);
3310 cm_id_priv->id.state = IB_CM_IDLE;
3311 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3312 spin_unlock_irq(&cm_id_priv->lock);
3314 cm_format_sidr_rep_event(work);
3315 cm_process_work(cm_id_priv, work);
3318 cm_deref_id(cm_id_priv);
3322 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3323 enum ib_wc_status wc_status)
3325 struct cm_id_private *cm_id_priv;
3326 struct ib_cm_event cm_event;
3327 enum ib_cm_state state;
3330 memset(&cm_event, 0, sizeof cm_event);
3331 cm_id_priv = msg->context[0];
3333 /* Discard old sends or ones without a response. */
3334 spin_lock_irq(&cm_id_priv->lock);
3335 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3336 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3340 case IB_CM_REQ_SENT:
3341 case IB_CM_MRA_REQ_RCVD:
3342 cm_reset_to_idle(cm_id_priv);
3343 cm_event.event = IB_CM_REQ_ERROR;
3345 case IB_CM_REP_SENT:
3346 case IB_CM_MRA_REP_RCVD:
3347 cm_reset_to_idle(cm_id_priv);
3348 cm_event.event = IB_CM_REP_ERROR;
3350 case IB_CM_DREQ_SENT:
3351 cm_enter_timewait(cm_id_priv);
3352 cm_event.event = IB_CM_DREQ_ERROR;
3354 case IB_CM_SIDR_REQ_SENT:
3355 cm_id_priv->id.state = IB_CM_IDLE;
3356 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3361 spin_unlock_irq(&cm_id_priv->lock);
3362 cm_event.param.send_status = wc_status;
3364 /* No other events can occur on the cm_id at this point. */
3365 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3368 ib_destroy_cm_id(&cm_id_priv->id);
3371 spin_unlock_irq(&cm_id_priv->lock);
3375 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3376 struct ib_mad_send_wc *mad_send_wc)
3378 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3379 struct cm_port *port;
3382 port = mad_agent->context;
3383 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3384 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3387 * If the send was in response to a received message (context[0] is not
3388 * set to a cm_id), and is not a REJ, then it is a send that was
3391 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3394 atomic_long_add(1 + msg->retries,
3395 &port->counter_group[CM_XMIT].counter[attr_index]);
3397 atomic_long_add(msg->retries,
3398 &port->counter_group[CM_XMIT_RETRIES].
3399 counter[attr_index]);
3401 switch (mad_send_wc->status) {
3403 case IB_WC_WR_FLUSH_ERR:
3407 if (msg->context[0] && msg->context[1])
3408 cm_process_send_error(msg, mad_send_wc->status);
3415 static void cm_work_handler(struct work_struct *_work)
3417 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3420 switch (work->cm_event.event) {
3421 case IB_CM_REQ_RECEIVED:
3422 ret = cm_req_handler(work);
3424 case IB_CM_MRA_RECEIVED:
3425 ret = cm_mra_handler(work);
3427 case IB_CM_REJ_RECEIVED:
3428 ret = cm_rej_handler(work);
3430 case IB_CM_REP_RECEIVED:
3431 ret = cm_rep_handler(work);
3433 case IB_CM_RTU_RECEIVED:
3434 ret = cm_rtu_handler(work);
3436 case IB_CM_USER_ESTABLISHED:
3437 ret = cm_establish_handler(work);
3439 case IB_CM_DREQ_RECEIVED:
3440 ret = cm_dreq_handler(work);
3442 case IB_CM_DREP_RECEIVED:
3443 ret = cm_drep_handler(work);
3445 case IB_CM_SIDR_REQ_RECEIVED:
3446 ret = cm_sidr_req_handler(work);
3448 case IB_CM_SIDR_REP_RECEIVED:
3449 ret = cm_sidr_rep_handler(work);
3451 case IB_CM_LAP_RECEIVED:
3452 ret = cm_lap_handler(work);
3454 case IB_CM_APR_RECEIVED:
3455 ret = cm_apr_handler(work);
3457 case IB_CM_TIMEWAIT_EXIT:
3458 ret = cm_timewait_handler(work);
3468 static int cm_establish(struct ib_cm_id *cm_id)
3470 struct cm_id_private *cm_id_priv;
3471 struct cm_work *work;
3472 unsigned long flags;
3474 struct cm_device *cm_dev;
3476 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3480 work = kmalloc(sizeof *work, GFP_ATOMIC);
3484 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3485 spin_lock_irqsave(&cm_id_priv->lock, flags);
3486 switch (cm_id->state)
3488 case IB_CM_REP_SENT:
3489 case IB_CM_MRA_REP_RCVD:
3490 cm_id->state = IB_CM_ESTABLISHED;
3492 case IB_CM_ESTABLISHED:
3499 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3507 * The CM worker thread may try to destroy the cm_id before it
3508 * can execute this work item. To prevent potential deadlock,
3509 * we need to find the cm_id once we're in the context of the
3510 * worker thread, rather than holding a reference on it.
3512 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3513 work->local_id = cm_id->local_id;
3514 work->remote_id = cm_id->remote_id;
3515 work->mad_recv_wc = NULL;
3516 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3518 /* Check if the device started its remove_one */
3519 spin_lock_irqsave(&cm.lock, flags);
3520 if (!cm_dev->going_down) {
3521 queue_delayed_work(cm.wq, &work->work, 0);
3526 spin_unlock_irqrestore(&cm.lock, flags);
3532 static int cm_migrate(struct ib_cm_id *cm_id)
3534 struct cm_id_private *cm_id_priv;
3535 struct cm_av tmp_av;
3536 unsigned long flags;
3537 int tmp_send_port_not_ready;
3540 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3541 spin_lock_irqsave(&cm_id_priv->lock, flags);
3542 if (cm_id->state == IB_CM_ESTABLISHED &&
3543 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3544 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3545 cm_id->lap_state = IB_CM_LAP_IDLE;
3546 /* Swap address vector */
3547 tmp_av = cm_id_priv->av;
3548 cm_id_priv->av = cm_id_priv->alt_av;
3549 cm_id_priv->alt_av = tmp_av;
3550 /* Swap port send ready state */
3551 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3552 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3553 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3556 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3561 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3566 case IB_EVENT_COMM_EST:
3567 ret = cm_establish(cm_id);
3569 case IB_EVENT_PATH_MIG:
3570 ret = cm_migrate(cm_id);
3577 EXPORT_SYMBOL(ib_cm_notify);
3579 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3580 struct ib_mad_send_buf *send_buf,
3581 struct ib_mad_recv_wc *mad_recv_wc)
3583 struct cm_port *port = mad_agent->context;
3584 struct cm_work *work;
3585 enum ib_cm_event_type event;
3590 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3591 case CM_REQ_ATTR_ID:
3592 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3593 alt_local_lid != 0);
3594 event = IB_CM_REQ_RECEIVED;
3596 case CM_MRA_ATTR_ID:
3597 event = IB_CM_MRA_RECEIVED;
3599 case CM_REJ_ATTR_ID:
3600 event = IB_CM_REJ_RECEIVED;
3602 case CM_REP_ATTR_ID:
3603 event = IB_CM_REP_RECEIVED;
3605 case CM_RTU_ATTR_ID:
3606 event = IB_CM_RTU_RECEIVED;
3608 case CM_DREQ_ATTR_ID:
3609 event = IB_CM_DREQ_RECEIVED;
3611 case CM_DREP_ATTR_ID:
3612 event = IB_CM_DREP_RECEIVED;
3614 case CM_SIDR_REQ_ATTR_ID:
3615 event = IB_CM_SIDR_REQ_RECEIVED;
3617 case CM_SIDR_REP_ATTR_ID:
3618 event = IB_CM_SIDR_REP_RECEIVED;
3620 case CM_LAP_ATTR_ID:
3622 event = IB_CM_LAP_RECEIVED;
3624 case CM_APR_ATTR_ID:
3625 event = IB_CM_APR_RECEIVED;
3628 ib_free_recv_mad(mad_recv_wc);
3632 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3633 atomic_long_inc(&port->counter_group[CM_RECV].
3634 counter[attr_id - CM_ATTR_ID_OFFSET]);
3636 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3639 ib_free_recv_mad(mad_recv_wc);
3643 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3644 work->cm_event.event = event;
3645 work->mad_recv_wc = mad_recv_wc;
3648 /* Check if the device started its remove_one */
3649 spin_lock_irq(&cm.lock);
3650 if (!port->cm_dev->going_down)
3651 queue_delayed_work(cm.wq, &work->work, 0);
3654 spin_unlock_irq(&cm.lock);
3658 ib_free_recv_mad(mad_recv_wc);
3662 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3663 struct ib_qp_attr *qp_attr,
3666 unsigned long flags;
3669 spin_lock_irqsave(&cm_id_priv->lock, flags);
3670 switch (cm_id_priv->id.state) {
3671 case IB_CM_REQ_SENT:
3672 case IB_CM_MRA_REQ_RCVD:
3673 case IB_CM_REQ_RCVD:
3674 case IB_CM_MRA_REQ_SENT:
3675 case IB_CM_REP_RCVD:
3676 case IB_CM_MRA_REP_SENT:
3677 case IB_CM_REP_SENT:
3678 case IB_CM_MRA_REP_RCVD:
3679 case IB_CM_ESTABLISHED:
3680 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3681 IB_QP_PKEY_INDEX | IB_QP_PORT;
3682 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3683 if (cm_id_priv->responder_resources)
3684 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3685 IB_ACCESS_REMOTE_ATOMIC;
3686 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3687 qp_attr->port_num = cm_id_priv->av.port->port_num;
3694 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3698 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3699 struct ib_qp_attr *qp_attr,
3702 unsigned long flags;
3705 spin_lock_irqsave(&cm_id_priv->lock, flags);
3706 switch (cm_id_priv->id.state) {
3707 case IB_CM_REQ_RCVD:
3708 case IB_CM_MRA_REQ_SENT:
3709 case IB_CM_REP_RCVD:
3710 case IB_CM_MRA_REP_SENT:
3711 case IB_CM_REP_SENT:
3712 case IB_CM_MRA_REP_RCVD:
3713 case IB_CM_ESTABLISHED:
3714 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3715 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3716 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3717 qp_attr->path_mtu = cm_id_priv->path_mtu;
3718 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3719 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3720 if (cm_id_priv->qp_type == IB_QPT_RC ||
3721 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
3722 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3723 IB_QP_MIN_RNR_TIMER;
3724 qp_attr->max_dest_rd_atomic =
3725 cm_id_priv->responder_resources;
3726 qp_attr->min_rnr_timer = 0;
3728 if (cm_id_priv->alt_av.ah_attr.dlid) {
3729 *qp_attr_mask |= IB_QP_ALT_PATH;
3730 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3731 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3732 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3733 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3741 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3745 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3746 struct ib_qp_attr *qp_attr,
3749 unsigned long flags;
3752 spin_lock_irqsave(&cm_id_priv->lock, flags);
3753 switch (cm_id_priv->id.state) {
3754 /* Allow transition to RTS before sending REP */
3755 case IB_CM_REQ_RCVD:
3756 case IB_CM_MRA_REQ_SENT:
3758 case IB_CM_REP_RCVD:
3759 case IB_CM_MRA_REP_SENT:
3760 case IB_CM_REP_SENT:
3761 case IB_CM_MRA_REP_RCVD:
3762 case IB_CM_ESTABLISHED:
3763 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3764 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3765 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3766 switch (cm_id_priv->qp_type) {
3768 case IB_QPT_XRC_INI:
3769 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
3770 IB_QP_MAX_QP_RD_ATOMIC;
3771 qp_attr->retry_cnt = cm_id_priv->retry_count;
3772 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3773 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3775 case IB_QPT_XRC_TGT:
3776 *qp_attr_mask |= IB_QP_TIMEOUT;
3777 qp_attr->timeout = cm_id_priv->av.timeout;
3782 if (cm_id_priv->alt_av.ah_attr.dlid) {
3783 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3784 qp_attr->path_mig_state = IB_MIG_REARM;
3787 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3788 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3789 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3790 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3791 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3792 qp_attr->path_mig_state = IB_MIG_REARM;
3800 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3804 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3805 struct ib_qp_attr *qp_attr,
3808 struct cm_id_private *cm_id_priv;
3811 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3812 switch (qp_attr->qp_state) {
3814 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3817 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3820 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3828 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3830 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3833 struct cm_counter_group *group;
3834 struct cm_counter_attribute *cm_attr;
3836 group = container_of(obj, struct cm_counter_group, obj);
3837 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3839 return sprintf(buf, "%ld\n",
3840 atomic_long_read(&group->counter[cm_attr->index]));
3843 static const struct sysfs_ops cm_counter_ops = {
3844 .show = cm_show_counter
3847 static struct kobj_type cm_counter_obj_type = {
3848 .sysfs_ops = &cm_counter_ops,
3849 .default_attrs = cm_counter_default_attrs
3852 static void cm_release_port_obj(struct kobject *obj)
3854 struct cm_port *cm_port;
3856 cm_port = container_of(obj, struct cm_port, port_obj);
3860 static struct kobj_type cm_port_obj_type = {
3861 .release = cm_release_port_obj
3864 static char *cm_devnode(struct device *dev, umode_t *mode)
3868 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3871 struct class cm_class = {
3872 .owner = THIS_MODULE,
3873 .name = "infiniband_cm",
3874 .devnode = cm_devnode,
3876 EXPORT_SYMBOL(cm_class);
3878 static int cm_create_port_fs(struct cm_port *port)
3882 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3883 &port->cm_dev->device->kobj,
3884 "%d", port->port_num);
3890 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3891 ret = kobject_init_and_add(&port->counter_group[i].obj,
3892 &cm_counter_obj_type,
3894 "%s", counter_group_names[i]);
3903 kobject_put(&port->counter_group[i].obj);
3904 kobject_put(&port->port_obj);
3909 static void cm_remove_port_fs(struct cm_port *port)
3913 for (i = 0; i < CM_COUNTER_GROUPS; i++)
3914 kobject_put(&port->counter_group[i].obj);
3916 kobject_put(&port->port_obj);
3919 static void cm_add_one(struct ib_device *ib_device)
3921 struct cm_device *cm_dev;
3922 struct cm_port *port;
3923 struct ib_mad_reg_req reg_req = {
3924 .mgmt_class = IB_MGMT_CLASS_CM,
3925 .mgmt_class_version = IB_CM_CLASS_VERSION,
3927 struct ib_port_modify port_modify = {
3928 .set_port_cap_mask = IB_PORT_CM_SUP
3930 unsigned long flags;
3935 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
3936 ib_device->phys_port_cnt, GFP_KERNEL);
3940 cm_dev->ib_device = ib_device;
3941 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
3942 cm_dev->going_down = 0;
3943 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3945 "%s", ib_device->name);
3946 if (IS_ERR(cm_dev->device)) {
3951 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3952 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3953 if (!rdma_cap_ib_cm(ib_device, i))
3956 port = kzalloc(sizeof *port, GFP_KERNEL);
3960 cm_dev->port[i-1] = port;
3961 port->cm_dev = cm_dev;
3964 INIT_LIST_HEAD(&port->cm_priv_prim_list);
3965 INIT_LIST_HEAD(&port->cm_priv_altr_list);
3967 ret = cm_create_port_fs(port);
3971 port->mad_agent = ib_register_mad_agent(ib_device, i,
3979 if (IS_ERR(port->mad_agent))
3982 ret = ib_modify_port(ib_device, i, 0, &port_modify);
3992 ib_set_client_data(ib_device, &cm_client, cm_dev);
3994 write_lock_irqsave(&cm.device_lock, flags);
3995 list_add_tail(&cm_dev->list, &cm.device_list);
3996 write_unlock_irqrestore(&cm.device_lock, flags);
4000 ib_unregister_mad_agent(port->mad_agent);
4002 cm_remove_port_fs(port);
4004 port_modify.set_port_cap_mask = 0;
4005 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4007 if (!rdma_cap_ib_cm(ib_device, i))
4010 port = cm_dev->port[i-1];
4011 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4012 ib_unregister_mad_agent(port->mad_agent);
4013 cm_remove_port_fs(port);
4016 device_unregister(cm_dev->device);
4020 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4022 struct cm_device *cm_dev = client_data;
4023 struct cm_port *port;
4024 struct cm_id_private *cm_id_priv;
4025 struct ib_mad_agent *cur_mad_agent;
4026 struct ib_port_modify port_modify = {
4027 .clr_port_cap_mask = IB_PORT_CM_SUP
4029 unsigned long flags;
4035 write_lock_irqsave(&cm.device_lock, flags);
4036 list_del(&cm_dev->list);
4037 write_unlock_irqrestore(&cm.device_lock, flags);
4039 spin_lock_irq(&cm.lock);
4040 cm_dev->going_down = 1;
4041 spin_unlock_irq(&cm.lock);
4043 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4044 if (!rdma_cap_ib_cm(ib_device, i))
4047 port = cm_dev->port[i-1];
4048 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4049 /* Mark all the cm_id's as not valid */
4050 spin_lock_irq(&cm.lock);
4051 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4052 cm_id_priv->altr_send_port_not_ready = 1;
4053 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4054 cm_id_priv->prim_send_port_not_ready = 1;
4055 spin_unlock_irq(&cm.lock);
4057 * We flush the queue here after the going_down set, this
4058 * verify that no new works will be queued in the recv handler,
4059 * after that we can call the unregister_mad_agent
4061 flush_workqueue(cm.wq);
4062 spin_lock_irq(&cm.state_lock);
4063 cur_mad_agent = port->mad_agent;
4064 port->mad_agent = NULL;
4065 spin_unlock_irq(&cm.state_lock);
4066 ib_unregister_mad_agent(cur_mad_agent);
4067 cm_remove_port_fs(port);
4070 device_unregister(cm_dev->device);
4074 static int __init ib_cm_init(void)
4078 memset(&cm, 0, sizeof cm);
4079 INIT_LIST_HEAD(&cm.device_list);
4080 rwlock_init(&cm.device_lock);
4081 spin_lock_init(&cm.lock);
4082 spin_lock_init(&cm.state_lock);
4083 cm.listen_service_table = RB_ROOT;
4084 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4085 cm.remote_id_table = RB_ROOT;
4086 cm.remote_qp_table = RB_ROOT;
4087 cm.remote_sidr_table = RB_ROOT;
4088 idr_init(&cm.local_id_table);
4089 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4090 INIT_LIST_HEAD(&cm.timewait_list);
4092 ret = class_register(&cm_class);
4098 cm.wq = create_workqueue("ib_cm");
4104 ret = ib_register_client(&cm_client);
4110 destroy_workqueue(cm.wq);
4112 class_unregister(&cm_class);
4114 idr_destroy(&cm.local_id_table);
4118 static void __exit ib_cm_cleanup(void)
4120 struct cm_timewait_info *timewait_info, *tmp;
4122 spin_lock_irq(&cm.lock);
4123 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4124 cancel_delayed_work(&timewait_info->work.work);
4125 spin_unlock_irq(&cm.lock);
4127 ib_unregister_client(&cm_client);
4128 destroy_workqueue(cm.wq);
4130 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4131 list_del(&timewait_info->list);
4132 kfree(timewait_info);
4135 class_unregister(&cm_class);
4136 idr_destroy(&cm.local_id_table);
4139 module_init_order(ib_cm_init, SI_ORDER_SECOND);
4140 module_exit_order(ib_cm_cleanup, SI_ORDER_FIRST);