2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 /***********************************************************/
33 /*This file support the handling of the Alias GUID feature. */
34 /***********************************************************/
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_smi.h>
37 #include <rdma/ib_cache.h>
38 #include <rdma/ib_sa.h>
39 #include <rdma/ib_pack.h>
40 #include <linux/mlx4/cmd.h>
41 #include <linux/module.h>
42 #include <linux/errno.h>
43 #include <rdma/ib_user_verbs.h>
44 #include <linux/delay.h>
48 The driver keeps the current state of all guids, as they are in the HW.
49 Whenever we receive an smp mad GUIDInfo record, the data will be cached.
52 struct mlx4_alias_guid_work_context {
54 struct mlx4_ib_dev *dev ;
55 struct ib_sa_query *sa_query;
56 struct completion done;
58 struct list_head list;
62 struct mlx4_next_alias_guid_work {
65 struct mlx4_sriov_alias_guid_info_rec_det rec_det;
69 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
70 u8 port_num, u8 *p_data)
75 int port_index = port_num - 1;
77 if (!mlx4_is_master(dev->dev))
80 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
81 ports_guid[port_num - 1].
82 all_rec_per_port[block_num].guid_indexes);
83 pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, (long long)guid_indexes);
85 for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
86 /* The location of the specific index starts from bit number 4
88 if (test_bit(i + 4, (unsigned long *)&guid_indexes)) {
89 slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
90 if (slave_id >= dev->dev->num_slaves) {
91 pr_debug("The last slave: %d\n", slave_id);
96 memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
97 &p_data[i * GUID_REC_SIZE],
100 pr_debug("Guid number: %d in block: %d"
101 " was not updated\n", i, block_num);
105 static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index)
107 if (index >= NUM_ALIAS_GUID_PER_PORT) {
108 pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
109 return (__force __be64) -1;
111 return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
115 ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
117 return IB_SA_COMP_MASK(4 + index);
121 * Whenever new GUID is set/unset (guid table change) create event and
122 * notify the relevant slave (master also should be notified).
123 * If the GUID value is not as we have in the cache the slave will not be
124 * updated; in this case it waits for the smp_snoop or the port management
125 * event to call the function and to update the slave.
126 * block_number - the index of the block (16 blocks available)
127 * port_number - 1 or 2
129 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
130 int block_num, u8 port_num,
136 enum slave_port_state new_state;
137 enum slave_port_state prev_state;
138 __be64 tmp_cur_ag, form_cache_ag;
139 enum slave_port_gen_event gen_event;
141 if (!mlx4_is_master(dev->dev))
144 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
145 ports_guid[port_num - 1].
146 all_rec_per_port[block_num].guid_indexes);
147 pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, (long long)guid_indexes);
149 /*calculate the slaves and notify them*/
150 for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
151 /* the location of the specific index runs from bits 4..11 */
152 if (!(test_bit(i + 4, (unsigned long *)&guid_indexes)))
155 slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
156 if (slave_id >= dev->dev->num_slaves)
158 tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
159 form_cache_ag = get_cached_alias_guid(dev, port_num,
160 (NUM_ALIAS_GUID_IN_REC * block_num) + i);
162 * Check if guid is not the same as in the cache,
163 * If it is different, wait for the snoop_smp or the port mgmt
164 * change event to update the slave on its port state change
166 if (tmp_cur_ag != form_cache_ag)
168 mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
170 /*2 cases: Valid GUID, and Invalid Guid*/
172 if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
173 prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num);
174 new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
175 MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
177 pr_debug("slave: %d, port: %d prev_port_state: %d,"
178 " new_port_state: %d, gen_event: %d\n",
179 slave_id, port_num, prev_state, new_state, gen_event);
180 if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
181 pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
183 mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
184 port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
186 } else { /* request to invalidate GUID */
187 set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
188 MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
190 pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
192 mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num,
193 MLX4_PORT_CHANGE_SUBTYPE_DOWN);
198 static void aliasguid_query_handler(int status,
199 struct ib_sa_guidinfo_rec *guid_rec,
202 struct mlx4_ib_dev *dev;
203 struct mlx4_alias_guid_work_context *cb_ctx = context;
206 struct mlx4_sriov_alias_guid_info_rec_det *rec;
207 unsigned long flags, flags1;
213 port_index = cb_ctx->port - 1;
214 rec = &dev->sriov.alias_guid.ports_guid[port_index].
215 all_rec_per_port[cb_ctx->block_num];
218 rec->status = MLX4_GUID_INFO_STATUS_IDLE;
219 pr_debug("(port: %d) failed: status = %d\n",
220 cb_ctx->port, status);
224 if (guid_rec->block_num != cb_ctx->block_num) {
225 pr_err("block num mismatch: %d != %d\n",
226 cb_ctx->block_num, guid_rec->block_num);
230 pr_debug("lid/port: %d/%d, block_num: %d\n",
231 be16_to_cpu(guid_rec->lid), cb_ctx->port,
232 guid_rec->block_num);
234 rec = &dev->sriov.alias_guid.ports_guid[port_index].
235 all_rec_per_port[guid_rec->block_num];
237 rec->status = MLX4_GUID_INFO_STATUS_SET;
238 rec->method = MLX4_GUID_INFO_RECORD_SET;
240 for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
242 tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE];
243 /* check if the SM didn't assign one of the records.
244 * if it didn't, if it was not sysadmin request:
245 * ask the SM to give a new GUID, (instead of the driver request).
247 if (tmp_cur_ag == MLX4_NOT_SET_GUID) {
248 mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in "
249 "block_num: %d was declined by SM, "
250 "ownership by %d (0 = driver, 1=sysAdmin,"
251 " 2=None)\n", __func__, i,
252 guid_rec->block_num, rec->ownership);
253 if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) {
254 /* if it is driver assign, asks for new GUID from SM*/
255 *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
258 /* Mark the record as not assigned, and let it
259 * be sent again in the next work sched.*/
260 rec->status = MLX4_GUID_INFO_STATUS_IDLE;
261 rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
264 /* properly assigned record. */
265 /* We save the GUID we just got from the SM in the
266 * admin_guid in order to be persistent, and in the
267 * request from the sm the process will ask for the same GUID */
268 if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN &&
269 tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) {
270 /* the sysadmin assignment failed.*/
271 mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
272 " admin guid after SysAdmin "
274 "Record num %d in block_num:%d "
275 "was declined by SM, "
276 "new val(0x%llx) was kept\n",
279 (long long)be64_to_cpu(*(__be64 *) &
280 rec->all_recs[i * GUID_REC_SIZE]));
282 memcpy(&rec->all_recs[i * GUID_REC_SIZE],
283 &guid_rec->guid_info_list[i * GUID_REC_SIZE],
289 The func is call here to close the cases when the
290 sm doesn't send smp, so in the sa response the driver
293 mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num,
295 guid_rec->guid_info_list);
297 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
298 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
299 if (!dev->sriov.is_going_down)
300 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
301 &dev->sriov.alias_guid.ports_guid[port_index].
303 if (cb_ctx->sa_query) {
304 list_del(&cb_ctx->list);
307 complete(&cb_ctx->done);
308 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
309 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
312 static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
316 ib_sa_comp_mask comp_mask = 0;
318 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
319 = MLX4_GUID_INFO_STATUS_IDLE;
320 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method
321 = MLX4_GUID_INFO_RECORD_SET;
323 /* calculate the comp_mask for that record.*/
324 for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
326 *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
327 all_rec_per_port[index].all_recs[GUID_REC_SIZE * i];
329 check the admin value: if it's for delete (~00LL) or
330 it is the first guid of the first record (hw guid) or
331 the records is not in ownership of the sysadmin and the sm doesn't
332 need to assign GUIDs, then don't put it up for assignment.
334 if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
336 MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid.
337 ports_guid[port - 1].all_rec_per_port[index].ownership)
339 comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
341 dev->sriov.alias_guid.ports_guid[port - 1].
342 all_rec_per_port[index].guid_indexes = comp_mask;
345 static int set_guid_rec(struct ib_device *ibdev,
347 struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
350 struct mlx4_ib_dev *dev = to_mdev(ibdev);
351 struct ib_sa_guidinfo_rec guid_info_rec;
352 ib_sa_comp_mask comp_mask;
353 struct ib_port_attr attr;
354 struct mlx4_alias_guid_work_context *callback_context;
355 unsigned long resched_delay, flags, flags1;
356 struct list_head *head =
357 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
359 err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
361 pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
365 /*check the port was configured by the sm, otherwise no need to send */
366 if (attr.state != IB_PORT_ACTIVE) {
367 pr_debug("port %d not active...rescheduling\n", port);
368 resched_delay = 5 * HZ;
373 callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL);
374 if (!callback_context) {
376 resched_delay = HZ * 5;
379 callback_context->port = port;
380 callback_context->dev = dev;
381 callback_context->block_num = index;
383 memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
385 guid_info_rec.lid = cpu_to_be16(attr.lid);
386 guid_info_rec.block_num = index;
388 memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
389 GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC);
390 comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM |
391 rec_det->guid_indexes;
393 init_completion(&callback_context->done);
394 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
395 list_add_tail(&callback_context->list, head);
396 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
398 callback_context->query_id =
399 ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
400 ibdev, port, &guid_info_rec,
401 comp_mask, rec_det->method, 1000,
402 GFP_KERNEL, aliasguid_query_handler,
404 &callback_context->sa_query);
405 if (callback_context->query_id < 0) {
406 pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
407 "%d. will reschedule to the next 1 sec.\n",
408 callback_context->query_id);
409 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
410 list_del(&callback_context->list);
411 kfree(callback_context);
412 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
413 resched_delay = 1 * HZ;
421 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
422 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
423 invalidate_guid_record(dev, port, index);
424 if (!dev->sriov.is_going_down) {
425 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
426 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
429 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
430 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
436 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
439 unsigned long flags, flags1;
441 pr_debug("port %d\n", port);
443 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
444 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
445 for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
446 invalidate_guid_record(dev, port, i);
448 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
450 make sure no work waits in the queue, if the work is already
451 queued(not on the timer) the cancel will fail. That is not a problem
452 because we just want the work started.
454 cancel_delayed_work(&dev->sriov.alias_guid.
455 ports_guid[port - 1].alias_guid_work);
456 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
457 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
460 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
461 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
464 /* The function returns the next record that was
465 * not configured (or failed to be configured) */
466 static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
467 struct mlx4_next_alias_guid_work *rec)
472 for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
473 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
474 if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status ==
475 MLX4_GUID_INFO_STATUS_IDLE) {
476 memcpy(&rec->rec_det,
477 &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j],
478 sizeof (struct mlx4_sriov_alias_guid_info_rec_det));
481 dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status =
482 MLX4_GUID_INFO_STATUS_PENDING;
483 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
486 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
491 static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port,
493 struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
495 dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes =
496 rec_det->guid_indexes;
497 memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs,
498 rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
499 dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status =
503 static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port)
506 struct mlx4_sriov_alias_guid_info_rec_det rec_det ;
508 for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) {
509 memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
510 rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) |
511 IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 |
512 IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 |
513 IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 |
514 IB_SA_GUIDINFO_REC_GID7;
515 rec_det.status = MLX4_GUID_INFO_STATUS_IDLE;
516 set_administratively_guid_record(dev, port, j, &rec_det);
520 static void alias_guid_work(struct work_struct *work)
522 struct delayed_work *delay = to_delayed_work(work);
524 struct mlx4_next_alias_guid_work *rec;
525 struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port =
526 container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det,
528 struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent;
529 struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid,
530 struct mlx4_ib_sriov,
532 struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
534 rec = kzalloc(sizeof *rec, GFP_KERNEL);
536 pr_err("alias_guid_work: No Memory\n");
540 pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
541 ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
543 pr_debug("No more records to update.\n");
547 set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num,
555 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
557 unsigned long flags, flags1;
559 if (!mlx4_is_master(dev->dev))
561 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
562 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
563 if (!dev->sriov.is_going_down) {
564 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
565 &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
567 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
568 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
571 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
574 struct mlx4_ib_sriov *sriov = &dev->sriov;
575 struct mlx4_alias_guid_work_context *cb_ctx;
576 struct mlx4_sriov_alias_guid_port_rec_det *det;
577 struct ib_sa_query *sa_query;
580 for (i = 0 ; i < dev->num_ports; i++) {
581 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
582 det = &sriov->alias_guid.ports_guid[i];
583 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
584 while (!list_empty(&det->cb_list)) {
585 cb_ctx = list_entry(det->cb_list.next,
586 struct mlx4_alias_guid_work_context,
588 sa_query = cb_ctx->sa_query;
589 cb_ctx->sa_query = NULL;
590 list_del(&cb_ctx->list);
591 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
592 ib_sa_cancel_query(cb_ctx->query_id, sa_query);
593 wait_for_completion(&cb_ctx->done);
595 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
597 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
599 for (i = 0 ; i < dev->num_ports; i++) {
600 flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
601 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
603 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
604 kfree(dev->sriov.alias_guid.sa_client);
607 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
609 char alias_wq_name[15];
614 if (!mlx4_is_master(dev->dev))
616 dev->sriov.alias_guid.sa_client =
617 kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
618 if (!dev->sriov.alias_guid.sa_client)
621 ib_sa_register_client(dev->sriov.alias_guid.sa_client);
623 spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
625 for (i = 1; i <= dev->num_ports; ++i) {
626 if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
632 for (i = 0 ; i < dev->num_ports; i++) {
633 memset(&dev->sriov.alias_guid.ports_guid[i], 0,
634 sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
635 /*Check if the SM doesn't need to assign the GUIDs*/
636 for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
637 if (mlx4_ib_sm_guid_assign) {
638 dev->sriov.alias_guid.ports_guid[i].
640 ownership = MLX4_GUID_DRIVER_ASSIGN;
643 dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j].
644 ownership = MLX4_GUID_NONE_ASSIGN;
645 /*mark each val as it was deleted,
646 till the sysAdmin will give it valid val*/
647 for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
648 *(__be64 *)&dev->sriov.alias_guid.ports_guid[i].
649 all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] =
650 cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
653 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
654 /*prepare the records, set them to be allocated by sm*/
655 for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
656 invalidate_guid_record(dev, i + 1, j);
658 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
659 dev->sriov.alias_guid.ports_guid[i].port = i;
660 if (mlx4_ib_sm_guid_assign)
661 set_all_slaves_guids(dev, i);
663 snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
664 dev->sriov.alias_guid.ports_guid[i].wq =
665 create_singlethread_workqueue(alias_wq_name);
666 if (!dev->sriov.alias_guid.ports_guid[i].wq) {
670 INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
676 for (--i; i >= 0; i--) {
677 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
678 dev->sriov.alias_guid.ports_guid[i].wq = NULL;
682 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
683 kfree(dev->sriov.alias_guid.sa_client);
684 dev->sriov.alias_guid.sa_client = NULL;
685 pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret);