2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
35 #include <linux/mlx4/cmd.h>
36 #include <linux/idr.h>
37 #include <rdma/ib_cm.h>
41 #define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ)
50 struct mlx4_ib_dev *dev;
52 struct list_head list;
53 struct delayed_work timeout;
56 struct cm_generic_msg {
57 struct ib_mad_hdr hdr;
60 __be32 remote_comm_id;
64 unsigned char unused[0x60];
65 union ib_gid primary_path_sgid;
69 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
71 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
72 msg->local_comm_id = cpu_to_be32(cm_id);
75 static u32 get_local_comm_id(struct ib_mad *mad)
77 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
79 return be32_to_cpu(msg->local_comm_id);
82 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
84 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
85 msg->remote_comm_id = cpu_to_be32(cm_id);
88 static u32 get_remote_comm_id(struct ib_mad *mad)
90 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
92 return be32_to_cpu(msg->remote_comm_id);
95 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
97 struct cm_req_msg *msg = (struct cm_req_msg *)mad;
99 return msg->primary_path_sgid;
102 /* Lock should be taken before called */
103 static struct id_map_entry *
104 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
106 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
107 struct rb_node *node = sl_id_map->rb_node;
110 struct id_map_entry *id_map_entry =
111 rb_entry(node, struct id_map_entry, node);
113 if (id_map_entry->sl_cm_id > sl_cm_id)
114 node = node->rb_left;
115 else if (id_map_entry->sl_cm_id < sl_cm_id)
116 node = node->rb_right;
117 else if (id_map_entry->slave_id > slave_id)
118 node = node->rb_left;
119 else if (id_map_entry->slave_id < slave_id)
120 node = node->rb_right;
127 static void id_map_ent_timeout(struct work_struct *work)
129 struct delayed_work *delay = to_delayed_work(work);
130 struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
131 struct id_map_entry *db_ent, *found_ent;
132 struct mlx4_ib_dev *dev = ent->dev;
133 struct mlx4_ib_sriov *sriov = &dev->sriov;
134 struct rb_root *sl_id_map = &sriov->sl_id_map;
135 int pv_id = (int) ent->pv_cm_id;
137 spin_lock(&sriov->id_map_lock);
138 db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
141 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
142 if (found_ent && found_ent == ent)
143 rb_erase(&found_ent->node, sl_id_map);
144 idr_remove(&sriov->pv_id_table, pv_id);
147 list_del(&ent->list);
148 spin_unlock(&sriov->id_map_lock);
152 static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
154 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
155 struct rb_root *sl_id_map = &sriov->sl_id_map;
156 struct id_map_entry *ent, *found_ent;
158 spin_lock(&sriov->id_map_lock);
159 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id);
162 found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
163 if (found_ent && found_ent == ent)
164 rb_erase(&found_ent->node, sl_id_map);
165 idr_remove(&sriov->pv_id_table, pv_cm_id);
167 spin_unlock(&sriov->id_map_lock);
170 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
172 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
173 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
174 struct id_map_entry *ent;
175 int slave_id = new->slave_id;
176 int sl_cm_id = new->sl_cm_id;
178 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
180 pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
183 rb_replace_node(&ent->node, &new->node, sl_id_map);
187 /* Go to the bottom of the tree */
190 ent = rb_entry(parent, struct id_map_entry, node);
192 if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
193 link = &(*link)->rb_left;
195 link = &(*link)->rb_right;
198 rb_link_node(&new->node, parent, link);
199 rb_insert_color(&new->node, sl_id_map);
202 static struct id_map_entry *
203 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
207 struct id_map_entry *ent;
208 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
210 ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
212 mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n");
213 return ERR_PTR(-ENOMEM);
216 ent->sl_cm_id = sl_cm_id;
217 ent->slave_id = slave_id;
218 ent->scheduled_delete = 0;
219 ent->dev = to_mdev(ibdev);
220 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
223 spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
224 ret = idr_get_new_above(&sriov->pv_id_table, ent,
227 next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
228 ent->pv_cm_id = (u32)id;
229 sl_id_map_add(ibdev, ent);
232 spin_unlock(&sriov->id_map_lock);
233 } while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL));
234 /*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/
236 spin_lock(&sriov->id_map_lock);
237 list_add_tail(&ent->list, &sriov->cm_list);
238 spin_unlock(&sriov->id_map_lock);
243 mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
244 return ERR_PTR(-ENOMEM);
247 static struct id_map_entry *
248 id_map_get(struct ib_device *ibdev, int *pv_cm_id, int sl_cm_id, int slave_id)
250 struct id_map_entry *ent;
251 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
253 spin_lock(&sriov->id_map_lock);
254 if (*pv_cm_id == -1) {
255 ent = id_map_find_by_sl_id(ibdev, sl_cm_id, slave_id);
257 *pv_cm_id = (int) ent->pv_cm_id;
259 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
260 spin_unlock(&sriov->id_map_lock);
265 static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
267 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
270 spin_lock(&sriov->id_map_lock);
271 spin_lock_irqsave(&sriov->going_down_lock, flags);
272 /*make sure that there is no schedule inside the scheduled work.*/
273 if (!sriov->is_going_down) {
274 id->scheduled_delete = 1;
275 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
277 spin_unlock_irqrestore(&sriov->going_down_lock, flags);
278 spin_unlock(&sriov->id_map_lock);
281 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
284 struct id_map_entry *id;
288 sl_cm_id = get_local_comm_id(mad);
290 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
291 mad->mad_hdr.attr_id == CM_REP_ATTR_ID) {
292 id = id_map_alloc(ibdev, slave_id, sl_cm_id);
294 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
295 __func__, slave_id, sl_cm_id);
298 } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) {
301 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
305 pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n",
310 set_local_comm_id(mad, id->pv_cm_id);
312 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
313 schedule_delayed(ibdev, id);
314 else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
315 id_map_find_del(ibdev, pv_cm_id);
320 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
321 struct ib_mad *mad, int is_eth)
324 struct id_map_entry *id;
326 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID) {
332 gid = gid_from_req_msg(ibdev, mad);
333 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
335 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
336 gid.global.interface_id);
342 pv_cm_id = get_remote_comm_id(mad);
343 id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
346 pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id);
351 *slave = id->slave_id;
352 set_remote_comm_id(mad, id->sl_cm_id);
354 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
355 schedule_delayed(ibdev, id);
356 else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
357 mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
358 id_map_find_del(ibdev, (int) pv_cm_id);
364 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
366 spin_lock_init(&dev->sriov.id_map_lock);
367 INIT_LIST_HEAD(&dev->sriov.cm_list);
368 dev->sriov.sl_id_map = RB_ROOT;
369 idr_init(&dev->sriov.pv_id_table);
370 idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL);
373 /* slave = -1 ==> all slaves */
374 /* TBD -- call paravirt clean for single slave. Need for slave RESET event */
375 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
377 struct mlx4_ib_sriov *sriov = &dev->sriov;
378 struct rb_root *sl_id_map = &sriov->sl_id_map;
382 struct id_map_entry *map, *tmp_map;
383 /* cancel all delayed work queue entries */
385 spin_lock(&sriov->id_map_lock);
386 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
387 if (slave < 0 || slave == map->slave_id) {
388 if (map->scheduled_delete)
389 need_flush &= !!cancel_delayed_work(&map->timeout);
393 spin_unlock(&sriov->id_map_lock);
396 flush_scheduled_work(); /* make sure all timers were flushed */
398 /* now, remove all leftover entries from databases*/
399 spin_lock(&sriov->id_map_lock);
401 while (rb_first(sl_id_map)) {
402 struct id_map_entry *ent =
403 rb_entry(rb_first(sl_id_map),
404 struct id_map_entry, node);
406 rb_erase(&ent->node, sl_id_map);
407 idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id);
409 list_splice_init(&dev->sriov.cm_list, &lh);
411 /* first, move nodes belonging to slave to db remove list */
412 nd = rb_first(sl_id_map);
414 struct id_map_entry *ent =
415 rb_entry(nd, struct id_map_entry, node);
417 if (ent->slave_id == slave)
418 list_move_tail(&ent->list, &lh);
420 /* remove those nodes from databases */
421 list_for_each_entry_safe(map, tmp_map, &lh, list) {
422 rb_erase(&map->node, sl_id_map);
423 idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id);
426 /* add remaining nodes from cm_list */
427 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
428 if (slave == map->slave_id)
429 list_move_tail(&map->list, &lh);
433 spin_unlock(&sriov->id_map_lock);
435 /* free any map entries left behind due to cancel_delayed_work above */
436 list_for_each_entry_safe(map, tmp_map, &lh, list) {
437 list_del(&map->list);