]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/ofed/drivers/infiniband/hw/mlx4/alias_GUID.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / ofed / drivers / infiniband / hw / mlx4 / alias_GUID.c
1 /*
2  * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32  /***********************************************************/
33 /*This file support the handling of the Alias GUID feature. */
34 /***********************************************************/
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_smi.h>
37 #include <rdma/ib_cache.h>
38 #include <rdma/ib_sa.h>
39 #include <rdma/ib_pack.h>
40 #include <linux/mlx4/cmd.h>
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/errno.h>
44 #include <rdma/ib_user_verbs.h>
45 #include <linux/delay.h>
46 #include "mlx4_ib.h"
47
48 /*
49 The driver keeps the current state of all guids, as they are in the HW.
50 Whenever we receive an smp mad GUIDInfo record, the data will be cached.
51 */
52
53 struct mlx4_alias_guid_work_context {
54         u8 port;
55         struct mlx4_ib_dev     *dev ;
56         struct ib_sa_query     *sa_query;
57         struct completion       done;
58         int                     query_id;
59         struct list_head        list;
60         int                     block_num;
61 };
62
63 struct mlx4_next_alias_guid_work {
64         u8 port;
65         u8 block_num;
66         struct mlx4_sriov_alias_guid_info_rec_det rec_det;
67 };
68
69
70 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
71                                          u8 port_num, u8 *p_data)
72 {
73         int i;
74         u64 guid_indexes;
75         int slave_id;
76         int port_index = port_num - 1;
77
78         if (!mlx4_is_master(dev->dev))
79                 return;
80
81         guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
82                                    ports_guid[port_num - 1].
83                                    all_rec_per_port[block_num].guid_indexes);
84         pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
85
86         for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
87                 /* The location of the specific index starts from bit number 4
88                  * until bit num 11 */
89                 if (test_bit(i + 4, (unsigned long *)&guid_indexes)) {
90                         slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
91                         if (slave_id >= dev->dev->num_slaves) {
92                                 pr_debug("The last slave: %d\n", slave_id);
93                                 return;
94                         }
95
96                         /* cache the guid: */
97                         memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
98                                &p_data[i * GUID_REC_SIZE],
99                                GUID_REC_SIZE);
100                 } else
101                         pr_debug("Guid number: %d in block: %d"
102                                  " was not updated\n", i, block_num);
103         }
104 }
105
106 static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index)
107 {
108         if (index >= NUM_ALIAS_GUID_PER_PORT) {
109                 pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
110                 return (__force __be64) -1;
111         }
112         return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
113 }
114
115
116 ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
117 {
118         return IB_SA_COMP_MASK(4 + index);
119 }
120
121 /*
122  * Whenever new GUID is set/unset (guid table change) create event and
123  * notify the relevant slave (master also should be notified).
124  * If the GUID value is not as we have in the cache the slave will not be
125  * updated; in this case it waits for the smp_snoop or the port management
126  * event to call the function and to update the slave.
127  * block_number - the index of the block (16 blocks available)
128  * port_number - 1 or 2
129  */
130 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
131                                           int block_num, u8 port_num,
132                                           u8 *p_data)
133 {
134         int i;
135         u64 guid_indexes;
136         int slave_id;
137         enum slave_port_state new_state;
138         enum slave_port_state prev_state;
139         __be64 tmp_cur_ag, form_cache_ag;
140         enum slave_port_gen_event gen_event;
141
142         if (!mlx4_is_master(dev->dev))
143                 return;
144
145         guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
146                                    ports_guid[port_num - 1].
147                                    all_rec_per_port[block_num].guid_indexes);
148         pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
149
150         /*calculate the slaves and notify them*/
151         for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
152                 /* the location of the specific index runs from bits 4..11 */
153                 if (!(test_bit(i + 4, (unsigned long *)&guid_indexes)))
154                         continue;
155
156                 slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
157                 if (slave_id >= dev->dev->num_slaves)
158                         return;
159                 tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
160                 form_cache_ag = get_cached_alias_guid(dev, port_num,
161                                         (NUM_ALIAS_GUID_IN_REC * block_num) + i);
162                 /*
163                  * Check if guid is not the same as in the cache,
164                  * If it is different, wait for the snoop_smp or the port mgmt
165                  * change event to update the slave on its port state change
166                  */
167                 if (tmp_cur_ag != form_cache_ag)
168                         continue;
169                 mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
170
171                 /*2 cases: Valid GUID, and Invalid Guid*/
172
173                 if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
174                         prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num);
175                         new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
176                                                                   MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
177                                                                   &gen_event);
178                         pr_debug("slave: %d, port: %d prev_port_state: %d,"
179                                  " new_port_state: %d, gen_event: %d\n",
180                                  slave_id, port_num, prev_state, new_state, gen_event);
181                         if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
182                                 pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
183                                          slave_id, port_num);
184                                 mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
185                                                                port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
186                         }
187                 } else { /* request to invalidate GUID */
188                         set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
189                                                       MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
190                                                       &gen_event);
191                         pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
192                                  slave_id, port_num);
193                         mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num,
194                                                        MLX4_PORT_CHANGE_SUBTYPE_DOWN);
195                 }
196         }
197 }
198
199 static void aliasguid_query_handler(int status,
200                                     struct ib_sa_guidinfo_rec *guid_rec,
201                                     void *context)
202 {
203         struct mlx4_ib_dev *dev;
204         struct mlx4_alias_guid_work_context *cb_ctx = context;
205         u8 port_index ;
206         int i;
207         struct mlx4_sriov_alias_guid_info_rec_det *rec;
208         unsigned long flags, flags1;
209
210         if (!context)
211                 return;
212
213         dev = cb_ctx->dev;
214         port_index = cb_ctx->port - 1;
215         rec = &dev->sriov.alias_guid.ports_guid[port_index].
216                 all_rec_per_port[cb_ctx->block_num];
217
218         if (status) {
219                 rec->status = MLX4_GUID_INFO_STATUS_IDLE;
220                 pr_debug("(port: %d) failed: status = %d\n",
221                          cb_ctx->port, status);
222                 goto out;
223         }
224
225         if (guid_rec->block_num != cb_ctx->block_num) {
226                 pr_err("block num mismatch: %d != %d\n",
227                        cb_ctx->block_num, guid_rec->block_num);
228                 goto out;
229         }
230
231         pr_debug("lid/port: %d/%d, block_num: %d\n",
232                  be16_to_cpu(guid_rec->lid), cb_ctx->port,
233                  guid_rec->block_num);
234
235         rec = &dev->sriov.alias_guid.ports_guid[port_index].
236                 all_rec_per_port[guid_rec->block_num];
237
238         rec->status = MLX4_GUID_INFO_STATUS_SET;
239         rec->method = MLX4_GUID_INFO_RECORD_SET;
240
241         for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
242                 __be64 tmp_cur_ag;
243                 tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE];
244                 /* check if the SM didn't assign one of the records.
245                  * if it didn't, if it was not sysadmin request:
246                  * ask the SM to give a new GUID, (instead of the driver request).
247                  */
248                 if (tmp_cur_ag == MLX4_NOT_SET_GUID) {
249                         mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in "
250                                      "block_num: %d was declined by SM, "
251                                      "ownership by %d (0 = driver, 1=sysAdmin,"
252                                      " 2=None)\n", __func__, i,
253                                      guid_rec->block_num, rec->ownership);
254                         if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) {
255                                 /* if it is driver assign, asks for new GUID from SM*/
256                                 *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
257                                         MLX4_NOT_SET_GUID;
258
259                                 /* Mark the record as not assigned, and let it
260                                  * be sent again in the next work sched.*/
261                                 rec->status = MLX4_GUID_INFO_STATUS_IDLE;
262                                 rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
263                         }
264                 } else {
265                        /* properly assigned record. */
266                        /* We save the GUID we just got from the SM in the
267                         * admin_guid in order to be persistent, and in the
268                         * request from the sm the process will ask for the same GUID */
269                         if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN &&
270                             tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) {
271                                 /* the sysadmin assignment failed.*/
272                                 mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
273                                              " admin guid after SysAdmin "
274                                              "configuration. "
275                                              "Record num %d in block_num:%d "
276                                              "was declined by SM, "
277                                              "new val(0x%llx) was kept\n",
278                                               __func__, i,
279                                              guid_rec->block_num,
280                                              (long long)be64_to_cpu(*(__be64 *) &
281                                                          rec->all_recs[i * GUID_REC_SIZE]));
282                         } else {
283                                 memcpy(&rec->all_recs[i * GUID_REC_SIZE],
284                                        &guid_rec->guid_info_list[i * GUID_REC_SIZE],
285                                        GUID_REC_SIZE);
286                         }
287                 }
288         }
289         /*
290         The func is call here to close the cases when the
291         sm doesn't send smp, so in the sa response the driver
292         notifies the slave.
293         */
294         mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num,
295                                              cb_ctx->port,
296                                              guid_rec->guid_info_list);
297 out:
298         spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
299         spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
300         if (!dev->sriov.is_going_down)
301                 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
302                                    &dev->sriov.alias_guid.ports_guid[port_index].
303                                    alias_guid_work, 0);
304         if (cb_ctx->sa_query) {
305                 list_del(&cb_ctx->list);
306                 kfree(cb_ctx);
307         } else
308                 complete(&cb_ctx->done);
309         spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
310         spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
311 }
312
313 static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
314 {
315         int i;
316         u64 cur_admin_val;
317         ib_sa_comp_mask comp_mask = 0;
318
319         dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
320                 = MLX4_GUID_INFO_STATUS_IDLE;
321         dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method
322                 = MLX4_GUID_INFO_RECORD_SET;
323
324         /* calculate the comp_mask for that record.*/
325         for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
326                 cur_admin_val =
327                         *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
328                         all_rec_per_port[index].all_recs[GUID_REC_SIZE * i];
329                 /*
330                 check the admin value: if it's for delete (~00LL) or
331                 it is the first guid of the first record (hw guid) or
332                 the records is not in ownership of the sysadmin and the sm doesn't
333                 need to assign GUIDs, then don't put it up for assignment.
334                 */
335                 if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
336                     (!index && !i) ||
337                     MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid.
338                     ports_guid[port - 1].all_rec_per_port[index].ownership)
339                         continue;
340                 comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
341         }
342         dev->sriov.alias_guid.ports_guid[port - 1].
343                 all_rec_per_port[index].guid_indexes = comp_mask;
344 }
345
346 static int set_guid_rec(struct ib_device *ibdev,
347                         u8 port, int index,
348                         struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
349 {
350         int err;
351         struct mlx4_ib_dev *dev = to_mdev(ibdev);
352         struct ib_sa_guidinfo_rec guid_info_rec;
353         ib_sa_comp_mask comp_mask;
354         struct ib_port_attr attr;
355         struct mlx4_alias_guid_work_context *callback_context;
356         unsigned long resched_delay, flags, flags1;
357         struct list_head *head =
358                 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
359
360         err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
361         if (err) {
362                 pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
363                          err, port);
364                 return err;
365         }
366         /*check the port was configured by the sm, otherwise no need to send */
367         if (attr.state != IB_PORT_ACTIVE) {
368                 pr_debug("port %d not active...rescheduling\n", port);
369                 resched_delay = 5 * HZ;
370                 err = -EAGAIN;
371                 goto new_schedule;
372         }
373
374         callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL);
375         if (!callback_context) {
376                 err = -ENOMEM;
377                 resched_delay = HZ * 5;
378                 goto new_schedule;
379         }
380         callback_context->port = port;
381         callback_context->dev = dev;
382         callback_context->block_num = index;
383
384         memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
385
386         guid_info_rec.lid = cpu_to_be16(attr.lid);
387         guid_info_rec.block_num = index;
388
389         memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
390                GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC);
391         comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM |
392                 rec_det->guid_indexes;
393
394         init_completion(&callback_context->done);
395         spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
396         list_add_tail(&callback_context->list, head);
397         spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
398
399         callback_context->query_id =
400                 ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
401                                           ibdev, port, &guid_info_rec,
402                                           comp_mask, rec_det->method, 1000,
403                                           GFP_KERNEL, aliasguid_query_handler,
404                                           callback_context,
405                                           &callback_context->sa_query);
406         if (callback_context->query_id < 0) {
407                 pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
408                          "%d. will reschedule to the next 1 sec.\n",
409                          callback_context->query_id);
410                 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
411                 list_del(&callback_context->list);
412                 kfree(callback_context);
413                 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
414                 resched_delay = 1 * HZ;
415                 err = -EAGAIN;
416                 goto new_schedule;
417         }
418         err = 0;
419         goto out;
420
421 new_schedule:
422         spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
423         spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
424         invalidate_guid_record(dev, port, index);
425         if (!dev->sriov.is_going_down) {
426                 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
427                                    &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
428                                    resched_delay);
429         }
430         spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
431         spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
432
433 out:
434         return err;
435 }
436
437 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
438 {
439         int i;
440         unsigned long flags, flags1;
441
442         pr_debug("port %d\n", port);
443
444         spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
445         spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
446         for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
447                 invalidate_guid_record(dev, port, i);
448
449         if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
450                 /*
451                 make sure no work waits in the queue, if the work is already
452                 queued(not on the timer) the cancel will fail. That is not a problem
453                 because we just want the work started.
454                 */
455                 cancel_delayed_work(&dev->sriov.alias_guid.
456                                       ports_guid[port - 1].alias_guid_work);
457                 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
458                                    &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
459                                    0);
460         }
461         spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
462         spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
463 }
464
465 /* The function returns the next record that was
466  * not configured (or failed to be configured) */
467 static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
468                                      struct mlx4_next_alias_guid_work *rec)
469 {
470         int j;
471         unsigned long flags;
472
473         for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
474                 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
475                 if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status ==
476                     MLX4_GUID_INFO_STATUS_IDLE) {
477                         memcpy(&rec->rec_det,
478                                &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j],
479                                sizeof (struct mlx4_sriov_alias_guid_info_rec_det));
480                         rec->port = port;
481                         rec->block_num = j;
482                         dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status =
483                                 MLX4_GUID_INFO_STATUS_PENDING;
484                         spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
485                         return 0;
486                 }
487                 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
488         }
489         return -ENOENT;
490 }
491
492 static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port,
493                                              int rec_index,
494                                              struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
495 {
496         dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes =
497                 rec_det->guid_indexes;
498         memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs,
499                rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
500         dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status =
501                 rec_det->status;
502 }
503
504 static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port)
505 {
506         int j;
507         struct mlx4_sriov_alias_guid_info_rec_det rec_det ;
508
509         for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) {
510                 memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
511                 rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) |
512                         IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 |
513                         IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 |
514                         IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 |
515                         IB_SA_GUIDINFO_REC_GID7;
516                 rec_det.status = MLX4_GUID_INFO_STATUS_IDLE;
517                 set_administratively_guid_record(dev, port, j, &rec_det);
518         }
519 }
520
521 static void alias_guid_work(struct work_struct *work)
522 {
523         struct delayed_work *delay = to_delayed_work(work);
524         int ret = 0;
525         struct mlx4_next_alias_guid_work *rec;
526         struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port =
527                 container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det,
528                              alias_guid_work);
529         struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent;
530         struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid,
531                                                 struct mlx4_ib_sriov,
532                                                 alias_guid);
533         struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
534
535         rec = kzalloc(sizeof *rec, GFP_KERNEL);
536         if (!rec) {
537                 pr_err("alias_guid_work: No Memory\n");
538                 return;
539         }
540
541         pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
542         ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
543         if (ret) {
544                 pr_debug("No more records to update.\n");
545                 goto out;
546         }
547
548         set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num,
549                      &rec->rec_det);
550
551 out:
552         kfree(rec);
553 }
554
555
556 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
557 {
558         unsigned long flags, flags1;
559
560         if (!mlx4_is_master(dev->dev))
561                 return;
562         spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
563         spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
564         if (!dev->sriov.is_going_down) {
565                 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
566                            &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
567         }
568         spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
569         spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
570 }
571
572 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
573 {
574         int i;
575         struct mlx4_ib_sriov *sriov = &dev->sriov;
576         struct mlx4_alias_guid_work_context *cb_ctx;
577         struct mlx4_sriov_alias_guid_port_rec_det *det;
578         struct ib_sa_query *sa_query;
579         unsigned long flags;
580
581         for (i = 0 ; i < dev->num_ports; i++) {
582                 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
583                 det = &sriov->alias_guid.ports_guid[i];
584                 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
585                 while (!list_empty(&det->cb_list)) {
586                         cb_ctx = list_entry(det->cb_list.next,
587                                             struct mlx4_alias_guid_work_context,
588                                             list);
589                         sa_query = cb_ctx->sa_query;
590                         cb_ctx->sa_query = NULL;
591                         list_del(&cb_ctx->list);
592                         spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
593                         ib_sa_cancel_query(cb_ctx->query_id, sa_query);
594                         wait_for_completion(&cb_ctx->done);
595                         kfree(cb_ctx);
596                         spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
597                 }
598                 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
599         }
600         for (i = 0 ; i < dev->num_ports; i++) {
601                 flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
602                 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
603         }
604         ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
605         kfree(dev->sriov.alias_guid.sa_client);
606 }
607
608 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
609 {
610         char alias_wq_name[15];
611         int ret = 0;
612         int i, j, k;
613         union ib_gid gid;
614
615         if (!mlx4_is_master(dev->dev))
616                 return 0;
617         dev->sriov.alias_guid.sa_client =
618                 kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
619         if (!dev->sriov.alias_guid.sa_client)
620                 return -ENOMEM;
621
622         ib_sa_register_client(dev->sriov.alias_guid.sa_client);
623
624         spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
625
626         for (i = 1; i <= dev->num_ports; ++i) {
627                 if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
628                         ret = -EFAULT;
629                         goto err_unregister;
630                 }
631         }
632
633         for (i = 0 ; i < dev->num_ports; i++) {
634                 memset(&dev->sriov.alias_guid.ports_guid[i], 0,
635                        sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
636                 /*Check if the SM doesn't need to assign the GUIDs*/
637                 for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
638                         if (mlx4_ib_sm_guid_assign) {
639                                 dev->sriov.alias_guid.ports_guid[i].
640                                         all_rec_per_port[j].
641                                         ownership = MLX4_GUID_DRIVER_ASSIGN;
642                                 continue;
643                         }
644                         dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j].
645                                         ownership = MLX4_GUID_NONE_ASSIGN;
646                         /*mark each val as it was deleted,
647                           till the sysAdmin will give it valid val*/
648                         for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
649                                 *(__be64 *)&dev->sriov.alias_guid.ports_guid[i].
650                                         all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] =
651                                                 cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
652                         }
653                 }
654                 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
655                 /*prepare the records, set them to be allocated by sm*/
656                 for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
657                         invalidate_guid_record(dev, i + 1, j);
658
659                 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
660                 dev->sriov.alias_guid.ports_guid[i].port  = i;
661                 if (mlx4_ib_sm_guid_assign)
662                         set_all_slaves_guids(dev, i);
663
664                 snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
665                 dev->sriov.alias_guid.ports_guid[i].wq =
666                         create_singlethread_workqueue(alias_wq_name);
667                 if (!dev->sriov.alias_guid.ports_guid[i].wq) {
668                         ret = -ENOMEM;
669                         goto err_thread;
670                 }
671                 INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
672                           alias_guid_work);
673         }
674         return 0;
675
676 err_thread:
677         for (--i; i >= 0; i--) {
678                 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
679                 dev->sriov.alias_guid.ports_guid[i].wq = NULL;
680         }
681
682 err_unregister:
683         ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
684         kfree(dev->sriov.alias_guid.sa_client);
685         dev->sriov.alias_guid.sa_client = NULL;
686         pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret);
687         return ret;
688 }