2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 /***********************************************************/
33 /*This file support the handling of the Alias GUID feature. */
34 /***********************************************************/
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_smi.h>
37 #include <rdma/ib_cache.h>
38 #include <rdma/ib_sa.h>
39 #include <rdma/ib_pack.h>
40 #include <dev/mlx4/cmd.h>
41 #include <linux/module.h>
42 #include <linux/errno.h>
43 #include <rdma/ib_user_verbs.h>
44 #include <linux/delay.h>
48 The driver keeps the current state of all guids, as they are in the HW.
49 Whenever we receive an smp mad GUIDInfo record, the data will be cached.
52 struct mlx4_alias_guid_work_context {
54 struct mlx4_ib_dev *dev ;
55 struct ib_sa_query *sa_query;
56 struct completion done;
58 struct list_head list;
63 struct mlx4_next_alias_guid_work {
66 struct mlx4_sriov_alias_guid_info_rec_det rec_det;
70 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
71 u8 port_num, u8 *p_data)
76 int port_index = port_num - 1;
78 if (!mlx4_is_master(dev->dev))
81 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
82 ports_guid[port_num - 1].
83 all_rec_per_port[block_num].guid_indexes);
84 pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num,
85 (unsigned long long)guid_indexes);
87 for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
88 /* The location of the specific index starts from bit number 4
90 if (test_bit(i + 4, (unsigned long *)&guid_indexes)) {
91 slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
92 if (slave_id >= dev->dev->num_slaves) {
93 pr_debug("The last slave: %d\n", slave_id);
98 memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
99 &p_data[i * GUID_REC_SIZE],
102 pr_debug("Guid number: %d in block: %d"
103 " was not updated\n", i, block_num);
107 static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index)
109 if (index >= NUM_ALIAS_GUID_PER_PORT) {
110 pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
111 return (__force __be64) -1;
113 return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
117 ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
119 return IB_SA_COMP_MASK(4 + index);
123 * Whenever new GUID is set/unset (guid table change) create event and
124 * notify the relevant slave (master also should be notified).
125 * If the GUID value is not as we have in the cache the slave will not be
126 * updated; in this case it waits for the smp_snoop or the port management
127 * event to call the function and to update the slave.
128 * block_number - the index of the block (16 blocks available)
129 * port_number - 1 or 2
131 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
132 int block_num, u8 port_num,
138 enum slave_port_state new_state;
139 enum slave_port_state prev_state;
140 __be64 tmp_cur_ag, form_cache_ag;
141 enum slave_port_gen_event gen_event;
143 if (!mlx4_is_master(dev->dev))
146 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
147 ports_guid[port_num - 1].
148 all_rec_per_port[block_num].guid_indexes);
149 pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num,
150 (unsigned long long)guid_indexes);
152 /*calculate the slaves and notify them*/
153 for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
154 /* the location of the specific index runs from bits 4..11 */
155 if (!(test_bit(i + 4, (unsigned long *)&guid_indexes)))
158 slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
159 if (slave_id >= dev->dev->num_slaves)
161 tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
162 form_cache_ag = get_cached_alias_guid(dev, port_num,
163 (NUM_ALIAS_GUID_IN_REC * block_num) + i);
165 * Check if guid is not the same as in the cache,
166 * If it is different, wait for the snoop_smp or the port mgmt
167 * change event to update the slave on its port state change
169 if (tmp_cur_ag != form_cache_ag)
171 mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
173 /*2 cases: Valid GUID, and Invalid Guid*/
175 if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
176 prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num);
177 new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
178 MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
180 pr_debug("slave: %d, port: %d prev_port_state: %d,"
181 " new_port_state: %d, gen_event: %d\n",
182 slave_id, port_num, prev_state, new_state, gen_event);
183 if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
184 pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
186 mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
187 port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
189 } else { /* request to invalidate GUID */
190 set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
191 MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
193 pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
195 mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num,
196 MLX4_PORT_CHANGE_SUBTYPE_DOWN);
201 static void aliasguid_query_handler(int status,
202 struct ib_sa_guidinfo_rec *guid_rec,
205 struct mlx4_ib_dev *dev;
206 struct mlx4_alias_guid_work_context *cb_ctx = context;
209 struct mlx4_sriov_alias_guid_info_rec_det *rec;
210 unsigned long flags, flags1;
216 port_index = cb_ctx->port - 1;
217 rec = &dev->sriov.alias_guid.ports_guid[port_index].
218 all_rec_per_port[cb_ctx->block_num];
221 rec->status = MLX4_GUID_INFO_STATUS_IDLE;
222 pr_debug("(port: %d) failed: status = %d\n",
223 cb_ctx->port, status);
227 if (guid_rec->block_num != cb_ctx->block_num) {
228 pr_err("block num mismatch: %d != %d\n",
229 cb_ctx->block_num, guid_rec->block_num);
233 pr_debug("lid/port: %d/%d, block_num: %d\n",
234 be16_to_cpu(guid_rec->lid), cb_ctx->port,
235 guid_rec->block_num);
237 rec = &dev->sriov.alias_guid.ports_guid[port_index].
238 all_rec_per_port[guid_rec->block_num];
240 rec->status = MLX4_GUID_INFO_STATUS_SET;
241 rec->method = MLX4_GUID_INFO_RECORD_SET;
243 for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
245 tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE];
246 if ((cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE)
247 && (MLX4_NOT_SET_GUID == tmp_cur_ag)) {
248 pr_debug("%s:Record num %d in block_num:%d "
249 "was deleted by SM,ownership by %d "
250 "(0 = driver, 1=sysAdmin, 2=None)\n",
251 __func__, i, guid_rec->block_num,
253 rec->guid_indexes = rec->guid_indexes &
254 ~mlx4_ib_get_aguid_comp_mask_from_ix(i);
258 /* check if the SM didn't assign one of the records.
259 * if it didn't, if it was not sysadmin request:
260 * ask the SM to give a new GUID, (instead of the driver request).
262 if (tmp_cur_ag == MLX4_NOT_SET_GUID) {
263 mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in "
264 "block_num: %d was declined by SM, "
265 "ownership by %d (0 = driver, 1=sysAdmin,"
266 " 2=None)\n", __func__, i,
267 guid_rec->block_num, rec->ownership);
268 if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) {
269 /* if it is driver assign, asks for new GUID from SM*/
270 *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
273 /* Mark the record as not assigned, and let it
274 * be sent again in the next work sched.*/
275 rec->status = MLX4_GUID_INFO_STATUS_IDLE;
276 rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
279 /* properly assigned record. */
280 /* We save the GUID we just got from the SM in the
281 * admin_guid in order to be persistent, and in the
282 * request from the sm the process will ask for the same GUID */
283 if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN &&
284 tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) {
285 /* the sysadmin assignment failed.*/
286 mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
287 " admin guid after SysAdmin "
289 "Record num %d in block_num:%d "
290 "was declined by SM, "
291 "new val(0x%llx) was kept\n",
294 (long long)be64_to_cpu(*(__be64 *) &
295 rec->all_recs[i * GUID_REC_SIZE]));
297 memcpy(&rec->all_recs[i * GUID_REC_SIZE],
298 &guid_rec->guid_info_list[i * GUID_REC_SIZE],
304 The func is call here to close the cases when the
305 sm doesn't send smp, so in the sa response the driver
308 mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num,
310 guid_rec->guid_info_list);
312 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
313 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
314 if (!dev->sriov.is_going_down)
315 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
316 &dev->sriov.alias_guid.ports_guid[port_index].
318 if (cb_ctx->sa_query) {
319 list_del(&cb_ctx->list);
322 complete(&cb_ctx->done);
323 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
324 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
327 static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
331 ib_sa_comp_mask comp_mask = 0;
333 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
334 = MLX4_GUID_INFO_STATUS_IDLE;
335 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method
336 = MLX4_GUID_INFO_RECORD_SET;
338 /* calculate the comp_mask for that record.*/
339 for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
341 *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
342 all_rec_per_port[index].all_recs[GUID_REC_SIZE * i];
344 check the admin value: if it's for delete (~00LL) or
345 it is the first guid of the first record (hw guid) or
346 the records is not in ownership of the sysadmin and the sm doesn't
347 need to assign GUIDs, then don't put it up for assignment.
349 if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
351 MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid.
352 ports_guid[port - 1].all_rec_per_port[index].ownership)
354 comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
356 dev->sriov.alias_guid.ports_guid[port - 1].
357 all_rec_per_port[index].guid_indexes = comp_mask;
360 static int set_guid_rec(struct ib_device *ibdev,
362 struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
365 struct mlx4_ib_dev *dev = to_mdev(ibdev);
366 struct ib_sa_guidinfo_rec guid_info_rec;
367 ib_sa_comp_mask comp_mask;
368 struct ib_port_attr attr;
369 struct mlx4_alias_guid_work_context *callback_context;
370 unsigned long resched_delay, flags, flags1;
371 struct list_head *head =
372 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
374 err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
376 pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
380 /*check the port was configured by the sm, otherwise no need to send */
381 if (attr.state != IB_PORT_ACTIVE) {
382 pr_debug("port %d not active...rescheduling\n", port);
383 resched_delay = 5 * HZ;
388 callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL);
389 if (!callback_context) {
391 resched_delay = HZ * 5;
394 callback_context->port = port;
395 callback_context->dev = dev;
396 callback_context->block_num = index;
397 callback_context->method = rec_det->method;
398 memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
400 guid_info_rec.lid = cpu_to_be16(attr.lid);
401 guid_info_rec.block_num = index;
403 memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
404 GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC);
405 comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM |
406 rec_det->guid_indexes;
408 init_completion(&callback_context->done);
409 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
410 list_add_tail(&callback_context->list, head);
411 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
413 callback_context->query_id =
414 ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
415 ibdev, port, &guid_info_rec,
416 comp_mask, rec_det->method, 1000,
417 GFP_KERNEL, aliasguid_query_handler,
419 &callback_context->sa_query);
420 if (callback_context->query_id < 0) {
421 pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
422 "%d. will reschedule to the next 1 sec.\n",
423 callback_context->query_id);
424 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
425 list_del(&callback_context->list);
426 kfree(callback_context);
427 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
428 resched_delay = 1 * HZ;
436 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
437 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
438 invalidate_guid_record(dev, port, index);
439 if (!dev->sriov.is_going_down) {
440 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
441 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
444 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
445 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
451 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
454 unsigned long flags, flags1;
456 pr_debug("port %d\n", port);
458 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
459 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
460 for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
461 invalidate_guid_record(dev, port, i);
463 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
465 make sure no work waits in the queue, if the work is already
466 queued(not on the timer) the cancel will fail. That is not a problem
467 because we just want the work started.
469 cancel_delayed_work(&dev->sriov.alias_guid.
470 ports_guid[port - 1].alias_guid_work);
471 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
472 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
475 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
476 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
479 /* The function returns the next record that was
480 * not configured (or failed to be configured) */
481 static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
482 struct mlx4_next_alias_guid_work *rec)
487 for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
488 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
489 if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status ==
490 MLX4_GUID_INFO_STATUS_IDLE) {
491 memcpy(&rec->rec_det,
492 &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j],
493 sizeof (struct mlx4_sriov_alias_guid_info_rec_det));
496 dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status =
497 MLX4_GUID_INFO_STATUS_PENDING;
498 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
501 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
506 static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port,
508 struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
510 dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes =
511 rec_det->guid_indexes;
512 memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs,
513 rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
514 dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status =
518 static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port)
521 struct mlx4_sriov_alias_guid_info_rec_det rec_det ;
523 for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) {
524 memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
525 rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) |
526 IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 |
527 IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 |
528 IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 |
529 IB_SA_GUIDINFO_REC_GID7;
530 rec_det.status = MLX4_GUID_INFO_STATUS_IDLE;
531 set_administratively_guid_record(dev, port, j, &rec_det);
535 static void alias_guid_work(struct work_struct *work)
537 struct delayed_work *delay = to_delayed_work(work);
539 struct mlx4_next_alias_guid_work *rec;
540 struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port =
541 container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det,
543 struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent;
544 struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid,
545 struct mlx4_ib_sriov,
547 struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
549 rec = kzalloc(sizeof *rec, GFP_KERNEL);
551 pr_err("alias_guid_work: No Memory\n");
555 pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
556 ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
558 pr_debug("No more records to update.\n");
562 set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num,
570 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
572 unsigned long flags, flags1;
574 if (!mlx4_is_master(dev->dev))
576 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
577 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
578 if (!dev->sriov.is_going_down) {
579 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
580 &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
582 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
583 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
586 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
589 struct mlx4_ib_sriov *sriov = &dev->sriov;
590 struct mlx4_alias_guid_work_context *cb_ctx;
591 struct mlx4_sriov_alias_guid_port_rec_det *det;
592 struct ib_sa_query *sa_query;
595 for (i = 0 ; i < dev->num_ports; i++) {
596 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
597 det = &sriov->alias_guid.ports_guid[i];
598 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
599 while (!list_empty(&det->cb_list)) {
600 cb_ctx = list_entry(det->cb_list.next,
601 struct mlx4_alias_guid_work_context,
603 sa_query = cb_ctx->sa_query;
604 cb_ctx->sa_query = NULL;
605 list_del(&cb_ctx->list);
606 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
607 ib_sa_cancel_query(cb_ctx->query_id, sa_query);
608 wait_for_completion(&cb_ctx->done);
610 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
612 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
614 for (i = 0 ; i < dev->num_ports; i++) {
615 flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
616 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
618 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
619 kfree(dev->sriov.alias_guid.sa_client);
622 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
624 char alias_wq_name[15];
629 if (!mlx4_is_master(dev->dev))
631 dev->sriov.alias_guid.sa_client =
632 kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
633 if (!dev->sriov.alias_guid.sa_client)
636 ib_sa_register_client(dev->sriov.alias_guid.sa_client);
638 spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
640 for (i = 1; i <= dev->num_ports; ++i) {
641 if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
647 for (i = 0 ; i < dev->num_ports; i++) {
648 memset(&dev->sriov.alias_guid.ports_guid[i], 0,
649 sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
650 /*Check if the SM doesn't need to assign the GUIDs*/
651 for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
652 if (mlx4_ib_sm_guid_assign) {
653 dev->sriov.alias_guid.ports_guid[i].
655 ownership = MLX4_GUID_DRIVER_ASSIGN;
658 dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j].
659 ownership = MLX4_GUID_NONE_ASSIGN;
660 /*mark each val as it was deleted,
661 till the sysAdmin will give it valid val*/
662 for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
663 *(__be64 *)&dev->sriov.alias_guid.ports_guid[i].
664 all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] =
665 cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
668 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
669 /*prepare the records, set them to be allocated by sm*/
670 for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
671 invalidate_guid_record(dev, i + 1, j);
673 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
674 dev->sriov.alias_guid.ports_guid[i].port = i;
675 if (mlx4_ib_sm_guid_assign)
676 set_all_slaves_guids(dev, i);
678 snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
679 dev->sriov.alias_guid.ports_guid[i].wq =
680 create_singlethread_workqueue(alias_wq_name);
681 if (!dev->sriov.alias_guid.ports_guid[i].wq) {
685 INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
691 for (--i; i >= 0; i--) {
692 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
693 dev->sriov.alias_guid.ports_guid[i].wq = NULL;
697 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
698 kfree(dev->sriov.alias_guid.sa_client);
699 dev->sriov.alias_guid.sa_client = NULL;
700 pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret);