]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/ofed/drivers/infiniband/hw/mlx4/main.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / ofed / drivers / infiniband / hw / mlx4 / main.c
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/module.h>
35
36 #ifdef __linux__
37 #include <linux/proc_fs.h>
38 #endif
39
40 #include <linux/slab.h>
41 #include <linux/errno.h>
42 #include <linux/netdevice.h>
43 #include <linux/inetdevice.h>
44 #include <linux/if_vlan.h>
45 #include <linux/bitops.h>
46 #include <linux/if_ether.h>
47 #include <linux/fs.h>
48
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52
53 #include <linux/mlx4/driver.h>
54 #include <linux/mlx4/cmd.h>
55 #include <linux/sched.h>
56 #include "mlx4_ib.h"
57 #include "user.h"
58 #include "wc.h"
59
60 #define DRV_NAME        MLX4_IB_DRV_NAME
61 #define DRV_VERSION     "1.0"
62 #define DRV_RELDATE     "April 4, 2008"
63
64 #define MLX4_IB_DRIVER_PROC_DIR_NAME "driver/mlx4_ib"
65 #define MLX4_IB_MRS_PROC_DIR_NAME "mrs"
66
67 MODULE_AUTHOR("Roland Dreier");
68 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
69 MODULE_LICENSE("Dual BSD/GPL");
70 MODULE_VERSION(DRV_VERSION);
71
72 int mlx4_ib_sm_guid_assign = 1;
73
74 #ifdef __linux__
75 struct proc_dir_entry *mlx4_mrs_dir_entry;
76 static struct proc_dir_entry *mlx4_ib_driver_dir_entry;
77 #endif
78
79 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
80 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
81
82 static char dev_assign_str[512];
83 //module_param_string(dev_assign_str, dev_assign_str, sizeof(dev_assign_str), 0644);
84 MODULE_PARM_DESC(dev_assign_str, "Map all device function numbers to "
85                  "IB device numbers following the  pattern: "
86                  "bb:dd.f-0,bb:dd.f-1,... (all numbers are hexadecimals)."
87                  " Max supported devices - 32");
88
89 static const char mlx4_ib_version[] =
90         DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
91         DRV_VERSION " (" DRV_RELDATE ")\n";
92
93 struct update_gid_work {
94         struct work_struct      work;
95         union ib_gid            gids[128];
96         struct mlx4_ib_dev     *dev;
97         int                     port;
98 };
99
100 struct dev_rec {
101         int     bus;
102         int     dev;
103         int     func;
104         int     nr;
105 };
106
107 #define MAX_DR 32
108 static struct dev_rec dr[MAX_DR];
109
110 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
111
112 static struct workqueue_struct *wq;
113
114 static void init_query_mad(struct ib_smp *mad)
115 {
116         mad->base_version  = 1;
117         mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
118         mad->class_version = 1;
119         mad->method        = IB_MGMT_METHOD_GET;
120 }
121
122 static union ib_gid zgid;
123
124 static int mlx4_ib_query_device(struct ib_device *ibdev,
125                                 struct ib_device_attr *props)
126 {
127         struct mlx4_ib_dev *dev = to_mdev(ibdev);
128         struct ib_smp *in_mad  = NULL;
129         struct ib_smp *out_mad = NULL;
130         int err = -ENOMEM;
131
132         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
133         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
134         if (!in_mad || !out_mad)
135                 goto out;
136
137         init_query_mad(in_mad);
138         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
139
140         err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
141                            1, NULL, NULL, in_mad, out_mad);
142         if (err)
143                 goto out;
144
145         memset(props, 0, sizeof *props);
146
147         props->fw_ver = dev->dev->caps.fw_ver;
148         props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
149                 IB_DEVICE_PORT_ACTIVE_EVENT             |
150                 IB_DEVICE_SYS_IMAGE_GUID                |
151                 IB_DEVICE_RC_RNR_NAK_GEN                |
152                 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK      |
153                 IB_DEVICE_SHARED_MR;
154
155         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
156                 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
157         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
158                 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
159         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
160                 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
161         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
162                 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
163         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
164                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
165         if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
166                 props->device_cap_flags |= IB_DEVICE_UD_TSO;
167         if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
168                 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
169         if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
170             (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
171             (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
172                 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
173         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
174                 props->device_cap_flags |= IB_DEVICE_XRC;
175
176         props->device_cap_flags |= IB_DEVICE_QPG;
177         if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
178                 props->device_cap_flags |= IB_DEVICE_UD_RSS;
179                 props->max_rss_tbl_sz = dev->dev->caps.max_rss_tbl_sz;
180         }
181         props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
182                 0xffffff;
183         props->vendor_part_id      = dev->dev->pdev->device;
184         props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
185         memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
186
187         props->max_mr_size         = ~0ull;
188         props->page_size_cap       = dev->dev->caps.page_size_cap;
189         props->max_qp              = dev->dev->quotas.qp;
190         props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
191         props->max_sge             = min(dev->dev->caps.max_sq_sg,
192                                          dev->dev->caps.max_rq_sg);
193         props->max_cq              = dev->dev->quotas.cq;
194         props->max_cqe             = dev->dev->caps.max_cqes;
195         props->max_mr              = dev->dev->quotas.mpt;
196         props->max_pd              = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
197         props->max_qp_rd_atom      = dev->dev->caps.max_qp_dest_rdma;
198         props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
199         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
200         props->max_srq             = dev->dev->quotas.srq;
201         props->max_srq_wr          = dev->dev->caps.max_srq_wqes - 1;
202         props->max_srq_sge         = dev->dev->caps.max_srq_sge;
203         props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
204         props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
205         props->atomic_cap          = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
206                 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
207         props->masked_atomic_cap   = props->atomic_cap;
208         props->max_pkeys           = dev->dev->caps.pkey_table_len[1];
209         props->max_mcast_grp       = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
210         props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
211         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
212                                            props->max_mcast_grp;
213         props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
214
215 out:
216         kfree(in_mad);
217         kfree(out_mad);
218
219         return err;
220 }
221
222 static enum rdma_link_layer
223 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
224 {
225         struct mlx4_dev *dev = to_mdev(device)->dev;
226
227         return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
228                 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
229 }
230
231 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
232                               struct ib_port_attr *props, int netw_view)
233 {
234         struct ib_smp *in_mad  = NULL;
235         struct ib_smp *out_mad = NULL;
236         int ext_active_speed;
237         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
238         int err = -ENOMEM;
239
240         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
241         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
242         if (!in_mad || !out_mad)
243                 goto out;
244
245         init_query_mad(in_mad);
246         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
247         in_mad->attr_mod = cpu_to_be32(port);
248
249         if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
250                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
251
252         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
253                                 in_mad, out_mad);
254         if (err)
255                 goto out;
256
257
258         props->lid              = be16_to_cpup((__be16 *) (out_mad->data + 16));
259         props->lmc              = out_mad->data[34] & 0x7;
260         props->sm_lid           = be16_to_cpup((__be16 *) (out_mad->data + 18));
261         props->sm_sl            = out_mad->data[36] & 0xf;
262         props->state            = out_mad->data[32] & 0xf;
263         props->phys_state       = out_mad->data[33] >> 4;
264         props->port_cap_flags   = be32_to_cpup((__be32 *) (out_mad->data + 20));
265         if (netw_view)
266                 props->gid_tbl_len = out_mad->data[50];
267         else
268                 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
269         props->max_msg_sz       = to_mdev(ibdev)->dev->caps.max_msg_sz;
270         props->pkey_tbl_len     = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
271         props->bad_pkey_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 46));
272         props->qkey_viol_cntr   = be16_to_cpup((__be16 *) (out_mad->data + 48));
273         props->active_width     = out_mad->data[31] & 0xf;
274         props->active_speed     = out_mad->data[35] >> 4;
275         props->max_mtu          = out_mad->data[41] & 0xf;
276         props->active_mtu       = out_mad->data[36] >> 4;
277         props->subnet_timeout   = out_mad->data[51] & 0x1f;
278         props->max_vl_num       = out_mad->data[37] >> 4;
279         props->init_type_reply  = out_mad->data[41] >> 4;
280
281         /* Check if extended speeds (EDR/FDR/...) are supported */
282         if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
283                 ext_active_speed = out_mad->data[62] >> 4;
284
285                 switch (ext_active_speed) {
286                 case 1:
287                         props->active_speed = IB_SPEED_FDR;
288                         break;
289                 case 2:
290                         props->active_speed = IB_SPEED_EDR;
291                         break;
292                 }
293         }
294
295         /* If reported active speed is QDR, check if is FDR-10 */
296         if (props->active_speed == IB_SPEED_QDR) {
297                 init_query_mad(in_mad);
298                 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
299                 in_mad->attr_mod = cpu_to_be32(port);
300
301                 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
302                                    NULL, NULL, in_mad, out_mad);
303                 if (err)
304                         goto out;
305
306                 /* Checking LinkSpeedActive for FDR-10 */
307                 if (out_mad->data[15] & 0x1)
308                         props->active_speed = IB_SPEED_FDR10;
309         }
310
311         /* Avoid wrong speed value returned by FW if the IB link is down. */
312         if (props->state == IB_PORT_DOWN)
313                  props->active_speed = IB_SPEED_SDR;
314
315 out:
316         kfree(in_mad);
317         kfree(out_mad);
318         return err;
319 }
320
321 static u8 state_to_phys_state(enum ib_port_state state)
322 {
323         return state == IB_PORT_ACTIVE ? 5 : 3;
324 }
325
326 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
327                                struct ib_port_attr *props, int netw_view)
328 {
329
330         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
331         struct mlx4_ib_iboe *iboe = &mdev->iboe;
332         struct net_device *ndev;
333         enum ib_mtu tmp;
334         struct mlx4_cmd_mailbox *mailbox;
335         int err = 0;
336
337         mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
338         if (IS_ERR(mailbox))
339                 return PTR_ERR(mailbox);
340
341         err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
342                            MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
343                            MLX4_CMD_WRAPPED);
344         if (err)
345                 goto out;
346
347         props->active_width     =  (((u8 *)mailbox->buf)[5] == 0x40) ?
348                                                 IB_WIDTH_4X : IB_WIDTH_1X;
349         props->active_speed     = IB_SPEED_QDR;
350         props->port_cap_flags   = IB_PORT_CM_SUP;
351         if (netw_view)
352                 props->gid_tbl_len = MLX4_ROCE_MAX_GIDS;
353         else
354                 props->gid_tbl_len   = mdev->dev->caps.gid_table_len[port];
355
356         props->max_msg_sz       = mdev->dev->caps.max_msg_sz;
357         props->pkey_tbl_len     = 1;
358         props->max_mtu          = IB_MTU_4096;
359         props->max_vl_num       = 2;
360         props->state            = IB_PORT_DOWN;
361         props->phys_state       = state_to_phys_state(props->state);
362         props->active_mtu       = IB_MTU_256;
363         spin_lock(&iboe->lock);
364         ndev = iboe->netdevs[port - 1];
365         if (!ndev)
366                 goto out_unlock;
367
368         tmp = iboe_get_mtu(ndev->if_mtu);
369         props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
370
371         props->state            = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
372                                         IB_PORT_ACTIVE : IB_PORT_DOWN;
373         props->phys_state       = state_to_phys_state(props->state);
374 out_unlock:
375         spin_unlock(&iboe->lock);
376 out:
377         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
378         return err;
379 }
380
381 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
382                          struct ib_port_attr *props, int netw_view)
383 {
384         int err;
385
386         memset(props, 0, sizeof *props);
387
388         err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
389                 ib_link_query_port(ibdev, port, props, netw_view) :
390                                 eth_link_query_port(ibdev, port, props, netw_view);
391
392         return err;
393 }
394
395 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
396                               struct ib_port_attr *props)
397 {
398         /* returns host view */
399         return __mlx4_ib_query_port(ibdev, port, props, 0);
400 }
401
402 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
403                         union ib_gid *gid, int netw_view)
404 {
405         struct ib_smp *in_mad  = NULL;
406         struct ib_smp *out_mad = NULL;
407         int err = -ENOMEM;
408         struct mlx4_ib_dev *dev = to_mdev(ibdev);
409         int clear = 0;
410         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
411
412         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
413         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
414         if (!in_mad || !out_mad)
415                 goto out;
416
417         init_query_mad(in_mad);
418         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
419         in_mad->attr_mod = cpu_to_be32(port);
420
421         if (mlx4_is_mfunc(dev->dev) && netw_view)
422                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
423
424         err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
425         if (err)
426                 goto out;
427
428         memcpy(gid->raw, out_mad->data + 8, 8);
429
430         if (mlx4_is_mfunc(dev->dev) && !netw_view) {
431                 if (index) {
432                         /* For any index > 0, return the null guid */
433                         err = 0;
434                         clear = 1;
435                         goto out;
436                 }
437         }
438
439         init_query_mad(in_mad);
440         in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
441         in_mad->attr_mod = cpu_to_be32(index / 8);
442
443         err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
444                            NULL, NULL, in_mad, out_mad);
445         if (err)
446                 goto out;
447
448         memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
449
450 out:
451         if (clear)
452                 memset(gid->raw + 8, 0, 8);
453         kfree(in_mad);
454         kfree(out_mad);
455         return err;
456 }
457
458 static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
459                           union ib_gid *gid)
460 {
461         struct mlx4_ib_dev *dev = to_mdev(ibdev);
462
463         *gid = dev->iboe.gid_table[port - 1][index];
464
465         return 0;
466 }
467
468 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
469                              union ib_gid *gid)
470 {
471         if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
472                 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
473         else
474                 return iboe_query_gid(ibdev, port, index, gid);
475 }
476
477 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
478                          u16 *pkey, int netw_view)
479 {
480         struct ib_smp *in_mad  = NULL;
481         struct ib_smp *out_mad = NULL;
482         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
483         int err = -ENOMEM;
484
485         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
486         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
487         if (!in_mad || !out_mad)
488                 goto out;
489
490         init_query_mad(in_mad);
491         in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
492         in_mad->attr_mod = cpu_to_be32(index / 32);
493
494         if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
495                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
496
497         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
498                            in_mad, out_mad);
499         if (err)
500                 goto out;
501
502         *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
503
504 out:
505         kfree(in_mad);
506         kfree(out_mad);
507         return err;
508 }
509
510 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
511 {
512         return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
513 }
514
515 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
516                                  struct ib_device_modify *props)
517 {
518         struct mlx4_cmd_mailbox *mailbox;
519         unsigned long flags;
520
521         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
522                 return -EOPNOTSUPP;
523
524         if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
525                 return 0;
526
527         if (mlx4_is_slave(to_mdev(ibdev)->dev))
528                 return -EOPNOTSUPP;
529
530         spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
531         memcpy(ibdev->node_desc, props->node_desc, 64);
532         spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
533
534         /*
535          * If possible, pass node desc to FW, so it can generate
536          * a 144 trap.  If cmd fails, just ignore.
537          */
538         mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
539         if (IS_ERR(mailbox))
540                 return 0;
541
542         memset(mailbox->buf, 0, 256);
543         memcpy(mailbox->buf, props->node_desc, 64);
544         mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
545                  MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
546
547         mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
548
549         return 0;
550 }
551
552 static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
553                          u32 cap_mask)
554 {
555         struct mlx4_cmd_mailbox *mailbox;
556         int err;
557         u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
558
559         mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
560         if (IS_ERR(mailbox))
561                 return PTR_ERR(mailbox);
562
563         memset(mailbox->buf, 0, 256);
564
565         if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
566                 *(u8 *) mailbox->buf         = !!reset_qkey_viols << 6;
567                 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
568         } else {
569                 ((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
570                 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
571         }
572
573         err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
574                        MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
575
576         mlx4_free_cmd_mailbox(dev->dev, mailbox);
577         return err;
578 }
579
580 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
581                                struct ib_port_modify *props)
582 {
583         struct ib_port_attr attr;
584         u32 cap_mask;
585         int err;
586
587         mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
588
589         err = mlx4_ib_query_port(ibdev, port, &attr);
590         if (err)
591                 goto out;
592
593         cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
594                 ~props->clr_port_cap_mask;
595
596         err = mlx4_SET_PORT(to_mdev(ibdev), port,
597                             !!(mask & IB_PORT_RESET_QKEY_CNTR),
598                             cap_mask);
599
600 out:
601         mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
602         return err;
603 }
604
605 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
606                                                   struct ib_udata *udata)
607 {
608         struct mlx4_ib_dev *dev = to_mdev(ibdev);
609         struct mlx4_ib_ucontext *context;
610         struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
611         struct mlx4_ib_alloc_ucontext_resp resp;
612         int err;
613
614         if (!dev->ib_active)
615                 return ERR_PTR(-EAGAIN);
616
617         if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
618                 resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
619                 if (mlx4_wc_enabled()) {
620                         resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
621                         resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
622                 } else {
623                         resp_v3.bf_reg_size      = 0;
624                         resp_v3.bf_regs_per_page = 0;
625                 }
626         } else {
627                 resp.dev_caps         = dev->dev->caps.userspace_caps;
628                 resp.qp_tab_size      = dev->dev->caps.num_qps;
629                 if (mlx4_wc_enabled()) {
630                         resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
631                         resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
632                 } else {
633                         resp.bf_reg_size      = 0;
634                         resp.bf_regs_per_page = 0;
635                 }
636                 resp.cqe_size         = dev->dev->caps.cqe_size;
637         }
638
639         context = kmalloc(sizeof *context, GFP_KERNEL);
640         if (!context)
641                 return ERR_PTR(-ENOMEM);
642
643         err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
644         if (err) {
645                 kfree(context);
646                 return ERR_PTR(err);
647         }
648
649         INIT_LIST_HEAD(&context->db_page_list);
650         mutex_init(&context->db_page_mutex);
651
652         if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
653                 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
654         else
655                 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
656
657         if (err) {
658                 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
659                 kfree(context);
660                 return ERR_PTR(-EFAULT);
661         }
662
663         return &context->ibucontext;
664 }
665
666 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
667 {
668         struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
669
670         mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
671         kfree(context);
672
673         return 0;
674 }
675 #ifdef __linux__
676 static unsigned long mlx4_ib_get_unmapped_area(struct file *file,
677                         unsigned long addr,
678                         unsigned long len, unsigned long pgoff,
679                         unsigned long flags)
680 {
681         struct mm_struct *mm;
682         struct vm_area_struct *vma;
683         unsigned long start_addr;
684         unsigned long page_size_order;
685         unsigned long  command;
686
687         mm = current->mm;
688         if (addr)
689                 return current->mm->get_unmapped_area(file, addr, len,
690                                                 pgoff, flags);
691
692         /* Last 8 bits hold the  command others are data per that command */
693         command = pgoff & MLX4_IB_MMAP_CMD_MASK;
694         if (command != MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES)
695                 return current->mm->get_unmapped_area(file, addr, len,
696                                                 pgoff, flags);
697
698         page_size_order = pgoff >> MLX4_IB_MMAP_CMD_BITS;
699         /* code is based on the huge-pages get_unmapped_area code */
700         start_addr = mm->free_area_cache;
701
702         if (len <= mm->cached_hole_size)
703                 start_addr = TASK_UNMAPPED_BASE;
704
705
706 full_search:
707         addr = ALIGN(start_addr, 1 << page_size_order);
708
709         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
710                 /* At this point:  (!vma || addr < vma->vm_end). */
711                 if (TASK_SIZE - len < addr) {
712                         /*
713                          * Start a new search - just in case we missed
714                          * some holes.
715                          */
716                         if (start_addr != TASK_UNMAPPED_BASE) {
717                                 start_addr = TASK_UNMAPPED_BASE;
718                                 goto full_search;
719                         }
720                         return -ENOMEM;
721                 }
722
723                 if (!vma || addr + len <= vma->vm_start)
724                         return addr;
725                 addr = ALIGN(vma->vm_end, 1 << page_size_order);
726         }
727 }
728 #endif
729
730 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
731 {
732         struct mlx4_ib_dev *dev = to_mdev(context->device);
733         int err;
734
735         /* Last 8 bits hold the  command others are data per that command */
736         unsigned long  command = vma->vm_pgoff & MLX4_IB_MMAP_CMD_MASK;
737
738         if (command < MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
739                 /* compatability handling for commands 0 & 1*/
740                 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
741                         return -EINVAL;
742         }
743         if (command == MLX4_IB_MMAP_UAR_PAGE) {
744                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
745
746                 if (io_remap_pfn_range(vma, vma->vm_start,
747                                        to_mucontext(context)->uar.pfn,
748                                        PAGE_SIZE, vma->vm_page_prot))
749                         return -EAGAIN;
750         } else if (command == MLX4_IB_MMAP_BLUE_FLAME_PAGE &&
751                         dev->dev->caps.bf_reg_size != 0) {
752                 vma->vm_page_prot = pgprot_wc(vma->vm_page_prot);
753
754                 if (io_remap_pfn_range(vma, vma->vm_start,
755                                        to_mucontext(context)->uar.pfn +
756                                        dev->dev->caps.num_uars,
757                                        PAGE_SIZE, vma->vm_page_prot))
758                         return -EAGAIN;
759         } else if (command == MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
760                 /* Getting contiguous physical pages */
761                 unsigned long total_size = vma->vm_end - vma->vm_start;
762                 unsigned long page_size_order = (vma->vm_pgoff) >>
763                                                 MLX4_IB_MMAP_CMD_BITS;
764                 struct ib_cmem *ib_cmem;
765                 ib_cmem = ib_cmem_alloc_contiguous_pages(context, total_size,
766                                                         page_size_order);
767                 if (IS_ERR(ib_cmem)) {
768                         err = PTR_ERR(ib_cmem);
769                         return err;
770                 }
771
772                 err = ib_cmem_map_contiguous_pages_to_vma(ib_cmem, vma);
773                 if (err) {
774                         ib_cmem_release_contiguous_pages(ib_cmem);
775                         return err;
776                 }
777                 return 0;
778         } else
779                 return -EINVAL;
780
781         return 0;
782 }
783
784 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
785                                       struct ib_ucontext *context,
786                                       struct ib_udata *udata)
787 {
788         struct mlx4_ib_pd *pd;
789         int err;
790
791         pd = kmalloc(sizeof *pd, GFP_KERNEL);
792         if (!pd)
793                 return ERR_PTR(-ENOMEM);
794
795         err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
796         if (err) {
797                 kfree(pd);
798                 return ERR_PTR(err);
799         }
800
801         if (context)
802                 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
803                         mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
804                         kfree(pd);
805                         return ERR_PTR(-EFAULT);
806                 }
807
808         return &pd->ibpd;
809 }
810
811 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
812 {
813         mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
814         kfree(pd);
815
816         return 0;
817 }
818
819 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
820                                           struct ib_ucontext *context,
821                                           struct ib_udata *udata)
822 {
823         struct mlx4_ib_xrcd *xrcd;
824         int err;
825
826         if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
827                 return ERR_PTR(-ENOSYS);
828
829         xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
830         if (!xrcd)
831                 return ERR_PTR(-ENOMEM);
832
833         err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
834         if (err)
835                 goto err1;
836
837         xrcd->pd = ib_alloc_pd(ibdev);
838         if (IS_ERR(xrcd->pd)) {
839                 err = PTR_ERR(xrcd->pd);
840                 goto err2;
841         }
842
843         xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
844         if (IS_ERR(xrcd->cq)) {
845                 err = PTR_ERR(xrcd->cq);
846                 goto err3;
847         }
848
849         return &xrcd->ibxrcd;
850
851 err3:
852         ib_dealloc_pd(xrcd->pd);
853 err2:
854         mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
855 err1:
856         kfree(xrcd);
857         return ERR_PTR(err);
858 }
859
860 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
861 {
862         ib_destroy_cq(to_mxrcd(xrcd)->cq);
863         ib_dealloc_pd(to_mxrcd(xrcd)->pd);
864         mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
865         kfree(xrcd);
866
867         return 0;
868 }
869
870 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
871 {
872         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
873         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
874         struct mlx4_ib_gid_entry *ge;
875
876         ge = kzalloc(sizeof *ge, GFP_KERNEL);
877         if (!ge)
878                 return -ENOMEM;
879
880         ge->gid = *gid;
881         if (mlx4_ib_add_mc(mdev, mqp, gid)) {
882                 ge->port = mqp->port;
883                 ge->added = 1;
884         }
885
886         mutex_lock(&mqp->mutex);
887         list_add_tail(&ge->list, &mqp->gid_list);
888         mutex_unlock(&mqp->mutex);
889
890         return 0;
891 }
892
893 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
894                    union ib_gid *gid)
895 {
896         u8 mac[6];
897         struct net_device *ndev;
898         int ret = 0;
899
900         if (!mqp->port)
901                 return 0;
902
903         spin_lock(&mdev->iboe.lock);
904         ndev = mdev->iboe.netdevs[mqp->port - 1];
905         if (ndev)
906                 dev_hold(ndev);
907         spin_unlock(&mdev->iboe.lock);
908
909         if (ndev) {
910                 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
911                 rtnl_lock();
912                 dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac, 6, 0);
913                 ret = 1;
914                 rtnl_unlock();
915                 dev_put(ndev);
916         }
917
918         return ret;
919 }
920
921 struct mlx4_ib_steering {
922         struct list_head list;
923         u64 reg_id;
924         union ib_gid gid;
925 };
926
927 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
928 {
929         int err;
930         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
931         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
932         u64 reg_id;
933         struct mlx4_ib_steering *ib_steering = NULL;
934
935         if (mdev->dev->caps.steering_mode ==
936             MLX4_STEERING_MODE_DEVICE_MANAGED) {
937                 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
938                 if (!ib_steering)
939                         return -ENOMEM;
940         }
941
942         err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
943                                     !!(mqp->flags &
944                                        MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
945                                     MLX4_PROT_IB_IPV6, &reg_id);
946         if (err)
947                 goto err_malloc;
948
949         err = add_gid_entry(ibqp, gid);
950         if (err)
951                 goto err_add;
952
953         if (ib_steering) {
954                 memcpy(ib_steering->gid.raw, gid->raw, 16);
955                 ib_steering->reg_id = reg_id;
956                 mutex_lock(&mqp->mutex);
957                 list_add(&ib_steering->list, &mqp->steering_rules);
958                 mutex_unlock(&mqp->mutex);
959         }
960         return 0;
961
962 err_add:
963         mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
964                               MLX4_PROT_IB_IPV6, reg_id);
965 err_malloc:
966         kfree(ib_steering);
967
968         return err;
969 }
970
971 enum {
972         IBV_FLOW_L4_NONE = 0,
973         IBV_FLOW_L4_OTHER = 3,
974         IBV_FLOW_L4_UDP = 5,
975         IBV_FLOW_L4_TCP = 6
976 };
977
978 struct mlx4_cm_steering {
979         struct list_head list;
980         u64 reg_id;
981         struct ib_flow_spec spec;
982 };
983
984 static int flow_spec_to_net_rule(struct ib_device *dev, struct ib_flow_spec *flow_spec,
985                                   struct list_head *rule_list_h)
986 {
987         struct mlx4_spec_list *spec_l2, *spec_l3, *spec_l4;
988         u64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
989
990         spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
991         if (!spec_l2)
992                 return -ENOMEM;
993
994         switch (flow_spec->type) {
995         case IB_FLOW_ETH:
996                 spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
997                 memcpy(spec_l2->eth.dst_mac, flow_spec->l2_id.eth.mac, ETH_ALEN);
998                 memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
999                 spec_l2->eth.ether_type = flow_spec->l2_id.eth.ethertype;
1000                 if (flow_spec->l2_id.eth.vlan_present) {
1001                         spec_l2->eth.vlan_id = flow_spec->l2_id.eth.vlan;
1002                         spec_l2->eth.vlan_id_msk = cpu_to_be16(0x0fff);
1003                 }
1004                 break;
1005         case IB_FLOW_IB_UC:
1006                 spec_l2->id = MLX4_NET_TRANS_RULE_ID_IB;
1007                 if(flow_spec->l2_id.ib_uc.qpn) {
1008                         spec_l2->ib.l3_qpn = cpu_to_be32(flow_spec->l2_id.ib_uc.qpn);
1009                         spec_l2->ib.qpn_msk = cpu_to_be32(0xffffff);
1010                     }
1011                 break;
1012         case IB_FLOW_IB_MC_IPV4:
1013         case IB_FLOW_IB_MC_IPV6:
1014                 spec_l2->id = MLX4_NET_TRANS_RULE_ID_IB;
1015                 memcpy(spec_l2->ib.dst_gid, flow_spec->l2_id.ib_mc.mgid, 16);
1016                 memset(spec_l2->ib.dst_gid_msk, 0xff, 16);
1017                 break;
1018         }
1019
1020
1021         list_add_tail(&spec_l2->list, rule_list_h);
1022
1023         if (flow_spec->l2_id.eth.ethertype == cpu_to_be16(ETH_P_IP) ||
1024             flow_spec->type != IB_FLOW_ETH) {
1025                 spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
1026                 if (!spec_l3)
1027                         return -ENOMEM;
1028
1029                 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
1030                 spec_l3->ipv4.src_ip = flow_spec->src_ip;
1031                 if (flow_spec->type != IB_FLOW_IB_MC_IPV4 &&
1032                     flow_spec->type != IB_FLOW_IB_MC_IPV6)
1033                         spec_l3->ipv4.dst_ip = flow_spec->dst_ip;
1034
1035                 if (spec_l3->ipv4.src_ip)
1036                         spec_l3->ipv4.src_ip_msk = MLX4_BE_WORD_MASK;
1037                 if (spec_l3->ipv4.dst_ip)
1038                         spec_l3->ipv4.dst_ip_msk = MLX4_BE_WORD_MASK;
1039
1040                 list_add_tail(&spec_l3->list, rule_list_h);
1041         }
1042
1043         if (flow_spec->l4_protocol) {
1044                 spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
1045                 if (!spec_l4)
1046                         return -ENOMEM;
1047
1048                 spec_l4->tcp_udp.src_port = flow_spec->src_port;
1049                 spec_l4->tcp_udp.dst_port = flow_spec->dst_port;
1050                 if (spec_l4->tcp_udp.src_port)
1051                         spec_l4->tcp_udp.src_port_msk =
1052                                                 MLX4_BE_SHORT_MASK;
1053                 if (spec_l4->tcp_udp.dst_port)
1054                         spec_l4->tcp_udp.dst_port_msk =
1055                                                 MLX4_BE_SHORT_MASK;
1056
1057                 switch (flow_spec->l4_protocol) {
1058                 case IBV_FLOW_L4_UDP:
1059                         spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
1060                         break;
1061                 case IBV_FLOW_L4_TCP:
1062                         spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
1063                         break;
1064                 default:
1065                         dev_err(dev->dma_device,
1066                                 "Unsupported l4 protocol.\n");
1067                         kfree(spec_l4);
1068                         return -EPROTONOSUPPORT;
1069                 }
1070                 list_add_tail(&spec_l4->list, rule_list_h);
1071         }
1072         return 0;
1073 }
1074
1075 static int __mlx4_ib_flow_attach(struct mlx4_ib_dev *mdev,
1076                                  struct mlx4_ib_qp *mqp,
1077                                  struct ib_flow_spec *flow_spec,
1078                                  int priority, int lock_qp)
1079 {
1080         u64 reg_id = 0;
1081         int err = 0;
1082         struct mlx4_cm_steering *cm_flow;
1083         struct mlx4_spec_list *spec, *tmp_spec;
1084
1085         struct mlx4_net_trans_rule rule =
1086         {       .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1087                 .exclusive = 0,
1088         };
1089
1090         rule.promisc_mode = flow_spec->rule_type;
1091         rule.port = mqp->port;
1092         rule.qpn = mqp->mqp.qpn;
1093         INIT_LIST_HEAD(&rule.list);
1094
1095         cm_flow = kmalloc(sizeof(*cm_flow), GFP_KERNEL);
1096         if (!cm_flow)
1097                 return -ENOMEM;
1098
1099         if (rule.promisc_mode == MLX4_FS_REGULAR) {
1100                 rule.allow_loopback = !flow_spec->block_mc_loopback;
1101                 rule.priority = MLX4_DOMAIN_UVERBS | priority;
1102                 err = flow_spec_to_net_rule(&mdev->ib_dev, flow_spec,
1103                                             &rule.list);
1104                 if (err)
1105                         goto free_list;
1106         }
1107
1108         err = mlx4_flow_attach(mdev->dev, &rule, &reg_id);
1109         if (err)
1110                 goto free_list;
1111
1112         memcpy(&cm_flow->spec, flow_spec, sizeof(*flow_spec));
1113         cm_flow->reg_id = reg_id;
1114
1115         if (lock_qp)
1116                 mutex_lock(&mqp->mutex);
1117         list_add(&cm_flow->list, &mqp->rules_list);
1118         if (lock_qp)
1119                 mutex_unlock(&mqp->mutex);
1120
1121 free_list:
1122         list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
1123                 list_del(&spec->list);
1124                 kfree(spec);
1125         }
1126         if (err) {
1127                 kfree(cm_flow);
1128                 dev_err(mdev->ib_dev.dma_device,
1129                         "Fail to attach flow steering rule\n");
1130         }
1131         return err;
1132 }
1133
1134 static int __mlx4_ib_flow_detach(struct mlx4_ib_dev *mdev,
1135                                  struct mlx4_ib_qp *mqp,
1136                                  struct ib_flow_spec *spec, int priority,
1137                                  int lock_qp)
1138 {
1139         struct mlx4_cm_steering *cm_flow;
1140         int ret;
1141
1142         if (lock_qp)
1143                 mutex_lock(&mqp->mutex);
1144         list_for_each_entry(cm_flow, &mqp->rules_list, list) {
1145                 if (!memcmp(&cm_flow->spec, spec, sizeof(*spec))) {
1146                         list_del(&cm_flow->list);
1147                         break;
1148                 }
1149         }
1150         if (lock_qp)
1151                 mutex_unlock(&mqp->mutex);
1152
1153         if (&cm_flow->list == &mqp->rules_list) {
1154                 dev_err(mdev->ib_dev.dma_device, "Couldn't find reg_id for flow spec. "
1155                         "Steering rule is left attached\n");
1156                 return -EINVAL;
1157         }
1158
1159         ret = mlx4_flow_detach(mdev->dev, cm_flow->reg_id);
1160
1161         kfree(cm_flow);
1162         return ret;
1163 }
1164
1165 static int mlx4_ib_flow_attach(struct ib_qp *qp, struct ib_flow_spec *flow_spec,
1166                                int priority)
1167 {
1168         return __mlx4_ib_flow_attach(to_mdev(qp->device), to_mqp(qp),
1169                                      flow_spec, priority, 1);
1170 }
1171
1172 static int mlx4_ib_flow_detach(struct ib_qp *qp, struct ib_flow_spec *spec,
1173                                int priority)
1174 {
1175         return __mlx4_ib_flow_detach(to_mdev(qp->device), to_mqp(qp),
1176                                      spec, priority, 1);
1177 }
1178
1179 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1180 {
1181         struct mlx4_ib_gid_entry *ge;
1182         struct mlx4_ib_gid_entry *tmp;
1183         struct mlx4_ib_gid_entry *ret = NULL;
1184
1185         list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1186                 if (!memcmp(raw, ge->gid.raw, 16)) {
1187                         ret = ge;
1188                         break;
1189                 }
1190         }
1191
1192         return ret;
1193 }
1194
1195 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1196 {
1197         int err;
1198         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1199         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1200         u8 mac[6];
1201         struct net_device *ndev;
1202         struct mlx4_ib_gid_entry *ge;
1203         u64 reg_id = 0;
1204
1205         if (mdev->dev->caps.steering_mode ==
1206             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1207                 struct mlx4_ib_steering *ib_steering;
1208
1209                 mutex_lock(&mqp->mutex);
1210                 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1211                         if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1212                                 list_del(&ib_steering->list);
1213                                 break;
1214                         }
1215                 }
1216                 mutex_unlock(&mqp->mutex);
1217                 if (&ib_steering->list == &mqp->steering_rules) {
1218                         pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1219                         return -EINVAL;
1220                 }
1221                 reg_id = ib_steering->reg_id;
1222                 kfree(ib_steering);
1223         }
1224
1225         err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1226                                     MLX4_PROT_IB_IPV6, reg_id);
1227         if (err)
1228                 return err;
1229
1230         mutex_lock(&mqp->mutex);
1231         ge = find_gid_entry(mqp, gid->raw);
1232         if (ge) {
1233                 spin_lock(&mdev->iboe.lock);
1234                 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1235                 if (ndev)
1236                         dev_hold(ndev);
1237                 spin_unlock(&mdev->iboe.lock);
1238                 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
1239                 if (ndev) {
1240                         rtnl_lock();
1241                         dev_mc_delete(mdev->iboe.netdevs[ge->port - 1], mac, 6, 0);
1242                         rtnl_unlock();
1243                         dev_put(ndev);
1244                 }
1245                 list_del(&ge->list);
1246                 kfree(ge);
1247         } else
1248                 pr_warn("could not find mgid entry\n");
1249
1250         mutex_unlock(&mqp->mutex);
1251
1252         return 0;
1253 }
1254
1255 static int init_node_data(struct mlx4_ib_dev *dev)
1256 {
1257         struct ib_smp *in_mad  = NULL;
1258         struct ib_smp *out_mad = NULL;
1259         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1260         int err = -ENOMEM;
1261
1262         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1263         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1264         if (!in_mad || !out_mad)
1265                 goto out;
1266
1267         init_query_mad(in_mad);
1268         in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1269         if (mlx4_is_master(dev->dev))
1270                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1271
1272         err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1273         if (err)
1274                 goto out;
1275
1276         memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1277
1278         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1279
1280         err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1281         if (err)
1282                 goto out;
1283
1284         dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1285         memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1286
1287 out:
1288         kfree(in_mad);
1289         kfree(out_mad);
1290         return err;
1291 }
1292
1293 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1294                         char *buf)
1295 {
1296         struct mlx4_ib_dev *dev =
1297                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1298         return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
1299 }
1300
1301 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1302                            char *buf)
1303 {
1304         struct mlx4_ib_dev *dev =
1305                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1306         return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1307                        (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1308                        (int) dev->dev->caps.fw_ver & 0xffff);
1309 }
1310
1311 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1312                         char *buf)
1313 {
1314         struct mlx4_ib_dev *dev =
1315                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1316         return sprintf(buf, "%x\n", dev->dev->rev_id);
1317 }
1318
1319 static ssize_t show_board(struct device *device, struct device_attribute *attr,
1320                           char *buf)
1321 {
1322         struct mlx4_ib_dev *dev =
1323                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1324         return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1325                        dev->dev->board_id);
1326 }
1327
1328 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
1329 static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
1330 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
1331 static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
1332
1333 static struct device_attribute *mlx4_class_attributes[] = {
1334         &dev_attr_hw_rev,
1335         &dev_attr_fw_ver,
1336         &dev_attr_hca_type,
1337         &dev_attr_board_id
1338 };
1339
1340 static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
1341 {
1342 #ifdef __linux__
1343         memcpy(eui, dev->dev_addr, 3);
1344         memcpy(eui + 5, dev->dev_addr + 3, 3);
1345 #else
1346         memcpy(eui, IF_LLADDR(dev), 3);
1347         memcpy(eui + 5, IF_LLADDR(dev) + 3, 3);
1348 #endif
1349         if (vlan_id < 0x1000) {
1350                 eui[3] = vlan_id >> 8;
1351                 eui[4] = vlan_id & 0xff;
1352         } else {
1353                 eui[3] = 0xff;
1354                 eui[4] = 0xfe;
1355         }
1356         eui[0] ^= 2;
1357 }
1358
1359 static void update_gids_task(struct work_struct *work)
1360 {
1361         struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
1362         struct mlx4_cmd_mailbox *mailbox;
1363         union ib_gid *gids;
1364         int err;
1365         struct mlx4_dev *dev = gw->dev->dev;
1366
1367         mailbox = mlx4_alloc_cmd_mailbox(dev);
1368         if (IS_ERR(mailbox)) {
1369                 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
1370                 return;
1371         }
1372
1373         gids = mailbox->buf;
1374         memcpy(gids, gw->gids, sizeof gw->gids);
1375
1376         err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1377                        1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1378                        MLX4_CMD_WRAPPED);
1379         if (err)
1380                 pr_warn("set port command failed\n");
1381         else {
1382                 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
1383                 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
1384         }
1385
1386         mlx4_free_cmd_mailbox(dev, mailbox);
1387         kfree(gw);
1388 }
1389
1390 static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
1391 {
1392         struct net_device *ndev = dev->iboe.netdevs[port - 1];
1393         struct update_gid_work *work;
1394         struct net_device *tmp;
1395         int i;
1396         u8 *hits;
1397         union ib_gid gid;
1398         int index_free;
1399         int found;
1400         int need_update = 0;
1401         int max_gids;
1402         u16 vid;
1403
1404         work = kzalloc(sizeof *work, GFP_ATOMIC);
1405         if (!work)
1406                 return -ENOMEM;
1407
1408         hits = kzalloc(128, GFP_ATOMIC);
1409         if (!hits) {
1410                 kfree(work);
1411                 return -ENOMEM;
1412         }
1413
1414         max_gids = dev->dev->caps.gid_table_len[port];
1415
1416 #ifdef __linux__
1417         rcu_read_lock();
1418         for_each_netdev_rcu(&init_net, tmp) {
1419 #else
1420         IFNET_RLOCK();
1421         TAILQ_FOREACH(tmp, &V_ifnet, if_link) {
1422 #endif
1423                 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
1424                         gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1425                         vid = rdma_vlan_dev_vlan_id(tmp);
1426                         mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
1427                         found = 0;
1428                         index_free = -1;
1429                         for (i = 0; i < max_gids; ++i) {
1430                                 if (index_free < 0 &&
1431                                     !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1432                                         index_free = i;
1433                                 if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
1434                                         hits[i] = 1;
1435                                         found = 1;
1436                                         break;
1437                                 }
1438                         }
1439
1440                         if (!found) {
1441                                 if (tmp == ndev &&
1442                                     (memcmp(&dev->iboe.gid_table[port - 1][0],
1443                                             &gid, sizeof gid) ||
1444                                      !memcmp(&dev->iboe.gid_table[port - 1][0],
1445                                              &zgid, sizeof gid))) {
1446                                         dev->iboe.gid_table[port - 1][0] = gid;
1447                                         ++need_update;
1448                                         hits[0] = 1;
1449                                 } else if (index_free >= 0) {
1450                                         dev->iboe.gid_table[port - 1][index_free] = gid;
1451                                         hits[index_free] = 1;
1452                                         ++need_update;
1453                                 }
1454                         }
1455                 }
1456 #ifdef __linux__        
1457         }
1458         rcu_read_unlock();
1459 #else
1460         }
1461         IFNET_RUNLOCK();
1462 #endif
1463
1464         for (i = 0; i < max_gids; ++i)
1465                 if (!hits[i]) {
1466                         if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1467                                 ++need_update;
1468                         dev->iboe.gid_table[port - 1][i] = zgid;
1469                 }
1470
1471         if (need_update) {
1472                 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
1473                 INIT_WORK(&work->work, update_gids_task);
1474                 work->port = port;
1475                 work->dev = dev;
1476                 queue_work(wq, &work->work);
1477         } else
1478                 kfree(work);
1479
1480         kfree(hits);
1481         return 0;
1482 }
1483
1484 static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
1485 {
1486         switch (event) {
1487         case NETDEV_UP:
1488 #ifdef __linux__
1489         case NETDEV_CHANGEADDR:
1490 #endif
1491                 update_ipv6_gids(dev, port, 0);
1492                 break;
1493
1494         case NETDEV_DOWN:
1495                 update_ipv6_gids(dev, port, 1);
1496                 dev->iboe.netdevs[port - 1] = NULL;
1497         }
1498 }
1499
1500 static void netdev_added(struct mlx4_ib_dev *dev, int port)
1501 {
1502         update_ipv6_gids(dev, port, 0);
1503 }
1504
1505 static void netdev_removed(struct mlx4_ib_dev *dev, int port)
1506 {
1507         update_ipv6_gids(dev, port, 1);
1508 }
1509
1510 static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
1511                                 void *ptr)
1512 {
1513         struct net_device *dev = ptr;
1514         struct mlx4_ib_dev *ibdev;
1515         struct net_device *oldnd;
1516         struct mlx4_ib_iboe *iboe;
1517         int port;
1518
1519 #ifdef __linux__
1520         if (!net_eq(dev_net(dev), &init_net))
1521                 return NOTIFY_DONE;
1522 #endif
1523
1524         ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1525         iboe = &ibdev->iboe;
1526
1527         spin_lock(&iboe->lock);
1528         mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1529                 oldnd = iboe->netdevs[port - 1];
1530                 iboe->netdevs[port - 1] =
1531                         mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1532                 if (oldnd != iboe->netdevs[port - 1]) {
1533                         if (iboe->netdevs[port - 1])
1534                                 netdev_added(ibdev, port);
1535                         else
1536                                 netdev_removed(ibdev, port);
1537                 }
1538         }
1539
1540         if (dev == iboe->netdevs[0] ||
1541             (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
1542                 handle_en_event(ibdev, 1, event);
1543         else if (dev == iboe->netdevs[1]
1544                  || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
1545                 handle_en_event(ibdev, 2, event);
1546
1547         spin_unlock(&iboe->lock);
1548
1549         return NOTIFY_DONE;
1550 }
1551
1552 static void init_pkeys(struct mlx4_ib_dev *ibdev)
1553 {
1554         int port;
1555         int slave;
1556         int i;
1557
1558         if (mlx4_is_master(ibdev->dev)) {
1559                 for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
1560                         for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1561                                 for (i = 0;
1562                                      i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1563                                      ++i) {
1564                                         ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
1565                                         /* master has the identity virt2phys pkey mapping */
1566                                                 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
1567                                                         ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
1568                                         mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
1569                                                              ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
1570                                 }
1571                         }
1572                 }
1573                 /* initialize pkey cache */
1574                 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1575                         for (i = 0;
1576                              i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1577                              ++i)
1578                                 ibdev->pkeys.phys_pkey_cache[port-1][i] =
1579                                         (i) ? 0 : 0xFFFF;
1580                 }
1581         }
1582 }
1583
1584 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1585 {
1586         char name[32];
1587         int eq_per_port = 0;
1588         int added_eqs = 0;
1589         int total_eqs = 0;
1590         int i, j, eq;
1591
1592         /* Legacy mode or comp_pool is not large enough */
1593         if (dev->caps.comp_pool == 0 ||
1594             dev->caps.num_ports > dev->caps.comp_pool)
1595                 return;
1596
1597         eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
1598                                         dev->caps.num_ports);
1599
1600         /* Init eq table */
1601         added_eqs = 0;
1602         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
1603                 added_eqs += eq_per_port;
1604
1605         total_eqs = dev->caps.num_comp_vectors + added_eqs;
1606
1607         ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
1608         if (!ibdev->eq_table)
1609                 return;
1610
1611         ibdev->eq_added = added_eqs;
1612
1613         eq = 0;
1614         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
1615                 for (j = 0; j < eq_per_port; j++) {
1616                         snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%d:%d:%d:%d", i, j,
1617                             pci_get_domain(dev->pdev->dev.bsddev),
1618                             pci_get_bus(dev->pdev->dev.bsddev),
1619                             PCI_SLOT(dev->pdev->devfn),
1620                             PCI_FUNC(dev->pdev->devfn));
1621
1622                         /* Set IRQ for specific name (per ring) */
1623                         if (mlx4_assign_eq(dev, name,
1624                                            &ibdev->eq_table[eq])) {
1625                                 /* Use legacy (same as mlx4_en driver) */
1626                                 pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
1627                                 ibdev->eq_table[eq] =
1628                                         (eq % dev->caps.num_comp_vectors);
1629                         }
1630                         eq++;
1631                 }
1632         }
1633
1634         /* Fill the reset of the vector with legacy EQ */
1635         for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
1636                 ibdev->eq_table[eq++] = i;
1637
1638         /* Advertise the new number of EQs to clients */
1639         ibdev->ib_dev.num_comp_vectors = total_eqs;
1640 }
1641
1642 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1643 {
1644         int i;
1645
1646         /* no additional eqs were added */
1647         if (!ibdev->eq_table)
1648                 return;
1649
1650         /* Reset the advertised EQ number */
1651         ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
1652
1653         /* Free only the added eqs */
1654         for (i = 0; i < ibdev->eq_added; i++) {
1655                 /* Don't free legacy eqs if used */
1656                 if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
1657                         continue;
1658                 mlx4_release_eq(dev, ibdev->eq_table[i]);
1659         }
1660
1661         kfree(ibdev->eq_table);
1662 }
1663
1664 /*
1665  * create show function and a device_attribute struct pointing to
1666  * the function for _name
1667  */
1668 #define DEVICE_DIAG_RPRT_ATTR(_name, _offset, _op_mod)          \
1669 static ssize_t show_rprt_##_name(struct device *dev,            \
1670                                  struct device_attribute *attr, \
1671                                  char *buf){                    \
1672         return show_diag_rprt(dev, buf, _offset, _op_mod);      \
1673 }                                                               \
1674 static DEVICE_ATTR(_name, S_IRUGO, show_rprt_##_name, NULL);
1675
1676 #define MLX4_DIAG_RPRT_CLEAR_DIAGS 3
1677
1678 static size_t show_diag_rprt(struct device *device, char *buf,
1679                              u32 offset, u8 op_modifier)
1680 {
1681         size_t ret;
1682         u32 counter_offset = offset;
1683         u32 diag_counter = 0;
1684         struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
1685                                                ib_dev.dev);
1686
1687         ret = mlx4_query_diag_counters(dev->dev, 1, op_modifier,
1688                                        &counter_offset, &diag_counter);
1689         if (ret)
1690                 return ret;
1691
1692         return sprintf(buf, "%d\n", diag_counter);
1693 }
1694
1695 static ssize_t clear_diag_counters(struct device *device,
1696                                    struct device_attribute *attr,
1697                                    const char *buf, size_t length)
1698 {
1699         size_t ret;
1700         struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
1701                                                ib_dev.dev);
1702
1703         ret = mlx4_query_diag_counters(dev->dev, 0, MLX4_DIAG_RPRT_CLEAR_DIAGS,
1704                                        NULL, NULL);
1705         if (ret)
1706                 return ret;
1707
1708         return length;
1709 }
1710
1711 DEVICE_DIAG_RPRT_ATTR(rq_num_lle        , 0x00, 2);
1712 DEVICE_DIAG_RPRT_ATTR(sq_num_lle        , 0x04, 2);
1713 DEVICE_DIAG_RPRT_ATTR(rq_num_lqpoe      , 0x08, 2);
1714 DEVICE_DIAG_RPRT_ATTR(sq_num_lqpoe      , 0x0C, 2);
1715 DEVICE_DIAG_RPRT_ATTR(rq_num_lpe        , 0x18, 2);
1716 DEVICE_DIAG_RPRT_ATTR(sq_num_lpe        , 0x1C, 2);
1717 DEVICE_DIAG_RPRT_ATTR(rq_num_wrfe       , 0x20, 2);
1718 DEVICE_DIAG_RPRT_ATTR(sq_num_wrfe       , 0x24, 2);
1719 DEVICE_DIAG_RPRT_ATTR(sq_num_mwbe       , 0x2C, 2);
1720 DEVICE_DIAG_RPRT_ATTR(sq_num_bre        , 0x34, 2);
1721 DEVICE_DIAG_RPRT_ATTR(rq_num_lae        , 0x38, 2);
1722 DEVICE_DIAG_RPRT_ATTR(sq_num_rire       , 0x44, 2);
1723 DEVICE_DIAG_RPRT_ATTR(rq_num_rire       , 0x48, 2);
1724 DEVICE_DIAG_RPRT_ATTR(sq_num_rae        , 0x4C, 2);
1725 DEVICE_DIAG_RPRT_ATTR(rq_num_rae        , 0x50, 2);
1726 DEVICE_DIAG_RPRT_ATTR(sq_num_roe        , 0x54, 2);
1727 DEVICE_DIAG_RPRT_ATTR(sq_num_tree       , 0x5C, 2);
1728 DEVICE_DIAG_RPRT_ATTR(sq_num_rree       , 0x64, 2);
1729 DEVICE_DIAG_RPRT_ATTR(rq_num_rnr        , 0x68, 2);
1730 DEVICE_DIAG_RPRT_ATTR(sq_num_rnr        , 0x6C, 2);
1731 DEVICE_DIAG_RPRT_ATTR(rq_num_oos        , 0x100, 2);
1732 DEVICE_DIAG_RPRT_ATTR(sq_num_oos        , 0x104, 2);
1733 DEVICE_DIAG_RPRT_ATTR(rq_num_mce        , 0x108, 2);
1734 DEVICE_DIAG_RPRT_ATTR(rq_num_udsdprd    , 0x118, 2);
1735 DEVICE_DIAG_RPRT_ATTR(rq_num_ucsdprd    , 0x120, 2);
1736 DEVICE_DIAG_RPRT_ATTR(num_cqovf         , 0x1A0, 2);
1737 DEVICE_DIAG_RPRT_ATTR(num_eqovf         , 0x1A4, 2);
1738 DEVICE_DIAG_RPRT_ATTR(num_baddb         , 0x1A8, 2);
1739
1740 static DEVICE_ATTR(clear_diag, S_IWUSR, NULL, clear_diag_counters);
1741
1742 static struct attribute *diag_rprt_attrs[] = {
1743         &dev_attr_rq_num_lle.attr,
1744         &dev_attr_sq_num_lle.attr,
1745         &dev_attr_rq_num_lqpoe.attr,
1746         &dev_attr_sq_num_lqpoe.attr,
1747         &dev_attr_rq_num_lpe.attr,
1748         &dev_attr_sq_num_lpe.attr,
1749         &dev_attr_rq_num_wrfe.attr,
1750         &dev_attr_sq_num_wrfe.attr,
1751         &dev_attr_sq_num_mwbe.attr,
1752         &dev_attr_sq_num_bre.attr,
1753         &dev_attr_rq_num_lae.attr,
1754         &dev_attr_sq_num_rire.attr,
1755         &dev_attr_rq_num_rire.attr,
1756         &dev_attr_sq_num_rae.attr,
1757         &dev_attr_rq_num_rae.attr,
1758         &dev_attr_sq_num_roe.attr,
1759         &dev_attr_sq_num_tree.attr,
1760         &dev_attr_sq_num_rree.attr,
1761         &dev_attr_rq_num_rnr.attr,
1762         &dev_attr_sq_num_rnr.attr,
1763         &dev_attr_rq_num_oos.attr,
1764         &dev_attr_sq_num_oos.attr,
1765         &dev_attr_rq_num_mce.attr,
1766         &dev_attr_rq_num_udsdprd.attr,
1767         &dev_attr_rq_num_ucsdprd.attr,
1768         &dev_attr_num_cqovf.attr,
1769         &dev_attr_num_eqovf.attr,
1770         &dev_attr_num_baddb.attr,
1771         &dev_attr_clear_diag.attr,
1772         NULL
1773 };
1774
1775 static struct attribute_group diag_counters_group = {
1776         .name  = "diag_counters",
1777         .attrs  = diag_rprt_attrs
1778 };
1779
1780 #ifdef __linux__
1781 static int mlx4_ib_proc_init(void)
1782 {
1783         /* Creating procfs directories /proc/drivers/mlx4_ib/ &&
1784               /proc/drivers/mlx4_ib/mrs for further use by the driver.
1785         */
1786         int err;
1787         
1788         mlx4_ib_driver_dir_entry = proc_mkdir(MLX4_IB_DRIVER_PROC_DIR_NAME,
1789                                 NULL);
1790         if (!mlx4_ib_driver_dir_entry) {
1791                 pr_err("mlx4_ib_proc_init has failed for %s\n",
1792                        MLX4_IB_DRIVER_PROC_DIR_NAME);
1793                 err = -ENODEV;
1794                 goto error;
1795         }
1796
1797         mlx4_mrs_dir_entry = proc_mkdir(MLX4_IB_MRS_PROC_DIR_NAME,
1798                                         mlx4_ib_driver_dir_entry);
1799         if (!mlx4_mrs_dir_entry) {
1800                 pr_err("mlx4_ib_proc_init has failed for %s\n",
1801                        MLX4_IB_MRS_PROC_DIR_NAME);
1802                 err = -ENODEV;
1803                 goto remove_entry;
1804         }
1805
1806         return 0;
1807
1808 remove_entry:
1809         remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME,
1810                                 NULL);
1811 error:
1812         return err;
1813 }
1814 #endif
1815
1816 static void init_dev_assign(void)
1817 {
1818         int bus, slot, fn, ib_idx;
1819         char *p = dev_assign_str, *t;
1820         char curr_val[32] = {0};
1821         int ret;
1822         int j, i = 0;
1823
1824         memset(dr, 0, sizeof dr);
1825
1826         if (dev_assign_str[0] == 0)
1827                 return;
1828
1829         while (strlen(p)) {
1830                 ret = sscanf(p, "%02x:%02x.%x-%x", &bus, &slot, &fn, &ib_idx);
1831                 if (ret != 4 || ib_idx < 0)
1832                         goto err;
1833
1834                 for (j = 0; j < i; j++)
1835                         if (dr[j].nr == ib_idx)
1836                                 goto err;
1837
1838                 dr[i].bus = bus;
1839                 dr[i].dev = slot;
1840                 dr[i].func = fn;
1841                 dr[i].nr = ib_idx;
1842
1843                 t = strchr(p, ',');
1844                 sprintf(curr_val, "%02x:%02x.%x-%x", bus, slot, fn, ib_idx);
1845                 if ((!t) && strlen(p) == strlen(curr_val))
1846                         return;
1847
1848                 if (!t || (t + 1) >= dev_assign_str + sizeof dev_assign_str)
1849                         goto err;
1850
1851                 ++i;
1852                 if (i >= MAX_DR)
1853                         goto err;
1854
1855                 p = t + 1;
1856         }
1857
1858         return;
1859 err:
1860         memset(dr, 0, sizeof dr);
1861         printk(KERN_WARNING "mlx4_ib: The value of 'dev_assign_str' parameter "
1862                             "is incorrect. The parameter value is discarded!");
1863 }
1864
1865 static void *mlx4_ib_add(struct mlx4_dev *dev)
1866 {
1867         struct mlx4_ib_dev *ibdev;
1868         int num_ports = 0;
1869         int i, j;
1870         int err;
1871         struct mlx4_ib_iboe *iboe;
1872
1873         printk(KERN_INFO "%s", mlx4_ib_version);
1874
1875         mlx4_foreach_ib_transport_port(i, dev)
1876                 num_ports++;
1877
1878         /* No point in registering a device with no ports... */
1879         if (num_ports == 0)
1880                 return NULL;
1881
1882         ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
1883         if (!ibdev) {
1884                 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
1885                 return NULL;
1886         }
1887
1888         iboe = &ibdev->iboe;
1889
1890         if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
1891                 goto err_dealloc;
1892
1893         if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
1894                 goto err_pd;
1895
1896         ibdev->priv_uar.map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT,
1897                 PAGE_SIZE);
1898
1899         if (!ibdev->priv_uar.map)
1900                 goto err_uar;
1901
1902         MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
1903
1904         ibdev->dev = dev;
1905
1906         strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
1907         ibdev->ib_dev.owner             = THIS_MODULE;
1908         ibdev->ib_dev.node_type         = RDMA_NODE_IB_CA;
1909         ibdev->ib_dev.local_dma_lkey    = dev->caps.reserved_lkey;
1910         ibdev->num_ports                = num_ports;
1911         ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
1912         ibdev->ib_dev.num_comp_vectors  = dev->caps.num_comp_vectors;
1913         ibdev->ib_dev.dma_device        = &dev->pdev->dev;
1914
1915         if (dev->caps.userspace_caps)
1916                 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
1917         else
1918                 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
1919
1920         ibdev->ib_dev.uverbs_cmd_mask   =
1921                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
1922                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
1923                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
1924                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
1925                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
1926                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
1927                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
1928                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1929                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
1930                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
1931                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
1932                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
1933                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
1934                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
1935                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
1936                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
1937                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
1938                 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
1939                 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
1940                 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
1941                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
1942                 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
1943                 (1ull << IB_USER_VERBS_CMD_OPEN_QP)             |
1944                 (1ull << IB_USER_VERBS_CMD_ATTACH_FLOW)         |
1945                 (1ull << IB_USER_VERBS_CMD_DETACH_FLOW)         |
1946                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1947
1948         ibdev->ib_dev.query_device      = mlx4_ib_query_device;
1949         ibdev->ib_dev.query_port        = mlx4_ib_query_port;
1950         ibdev->ib_dev.get_link_layer    = mlx4_ib_port_link_layer;
1951         ibdev->ib_dev.query_gid         = mlx4_ib_query_gid;
1952         ibdev->ib_dev.query_pkey        = mlx4_ib_query_pkey;
1953         ibdev->ib_dev.modify_device     = mlx4_ib_modify_device;
1954         ibdev->ib_dev.modify_port       = mlx4_ib_modify_port;
1955         ibdev->ib_dev.alloc_ucontext    = mlx4_ib_alloc_ucontext;
1956         ibdev->ib_dev.dealloc_ucontext  = mlx4_ib_dealloc_ucontext;
1957         ibdev->ib_dev.mmap              = mlx4_ib_mmap;
1958 #ifdef __linux__
1959         ibdev->ib_dev.get_unmapped_area = mlx4_ib_get_unmapped_area;
1960 #endif
1961         ibdev->ib_dev.alloc_pd          = mlx4_ib_alloc_pd;
1962         ibdev->ib_dev.dealloc_pd        = mlx4_ib_dealloc_pd;
1963         ibdev->ib_dev.create_ah         = mlx4_ib_create_ah;
1964         ibdev->ib_dev.query_ah          = mlx4_ib_query_ah;
1965         ibdev->ib_dev.destroy_ah        = mlx4_ib_destroy_ah;
1966         ibdev->ib_dev.create_srq        = mlx4_ib_create_srq;
1967         ibdev->ib_dev.modify_srq        = mlx4_ib_modify_srq;
1968         ibdev->ib_dev.query_srq         = mlx4_ib_query_srq;
1969         ibdev->ib_dev.destroy_srq       = mlx4_ib_destroy_srq;
1970         ibdev->ib_dev.post_srq_recv     = mlx4_ib_post_srq_recv;
1971         ibdev->ib_dev.create_qp         = mlx4_ib_create_qp;
1972         ibdev->ib_dev.modify_qp         = mlx4_ib_modify_qp;
1973         ibdev->ib_dev.query_qp          = mlx4_ib_query_qp;
1974         ibdev->ib_dev.destroy_qp        = mlx4_ib_destroy_qp;
1975         ibdev->ib_dev.post_send         = mlx4_ib_post_send;
1976         ibdev->ib_dev.post_recv         = mlx4_ib_post_recv;
1977         ibdev->ib_dev.create_cq         = mlx4_ib_create_cq;
1978         ibdev->ib_dev.modify_cq         = mlx4_ib_modify_cq;
1979         ibdev->ib_dev.resize_cq         = mlx4_ib_resize_cq;
1980         ibdev->ib_dev.destroy_cq        = mlx4_ib_destroy_cq;
1981         ibdev->ib_dev.poll_cq           = mlx4_ib_poll_cq;
1982         ibdev->ib_dev.req_notify_cq     = mlx4_ib_arm_cq;
1983         ibdev->ib_dev.get_dma_mr        = mlx4_ib_get_dma_mr;
1984         ibdev->ib_dev.reg_user_mr       = mlx4_ib_reg_user_mr;
1985         ibdev->ib_dev.dereg_mr          = mlx4_ib_dereg_mr;
1986         ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
1987         ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
1988         ibdev->ib_dev.free_fast_reg_page_list  = mlx4_ib_free_fast_reg_page_list;
1989         ibdev->ib_dev.attach_mcast      = mlx4_ib_mcg_attach;
1990         ibdev->ib_dev.detach_mcast      = mlx4_ib_mcg_detach;
1991         ibdev->ib_dev.attach_flow       = mlx4_ib_flow_attach;
1992         ibdev->ib_dev.detach_flow       = mlx4_ib_flow_detach;
1993         ibdev->ib_dev.process_mad       = mlx4_ib_process_mad;
1994
1995         if (!mlx4_is_slave(ibdev->dev)) {
1996                 ibdev->ib_dev.alloc_fmr         = mlx4_ib_fmr_alloc;
1997                 ibdev->ib_dev.map_phys_fmr      = mlx4_ib_map_phys_fmr;
1998                 ibdev->ib_dev.unmap_fmr         = mlx4_ib_unmap_fmr;
1999                 ibdev->ib_dev.dealloc_fmr       = mlx4_ib_fmr_dealloc;
2000         }
2001
2002         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2003                 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2004                 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2005                 ibdev->ib_dev.uverbs_cmd_mask |=
2006                         (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2007                         (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2008         }
2009
2010         mlx4_ib_alloc_eqs(dev, ibdev);
2011
2012         spin_lock_init(&iboe->lock);
2013
2014         if (init_node_data(ibdev))
2015                 goto err_map;
2016
2017         for (i = 0; i < ibdev->num_ports; ++i) {
2018                 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2019                                                 IB_LINK_LAYER_ETHERNET) {
2020                         err = mlx4_counter_alloc(ibdev->dev, i + 1, &ibdev->counters[i]);
2021                         if (err)
2022                                 ibdev->counters[i] = -1;
2023                 } else
2024                                 ibdev->counters[i] = -1;
2025         }
2026
2027         spin_lock_init(&ibdev->sm_lock);
2028         mutex_init(&ibdev->cap_mask_mutex);
2029
2030         if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2031             !mlx4_is_slave(dev)) {
2032                 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2033                 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2034                                             MLX4_IB_UC_STEER_QPN_ALIGN, &ibdev->steer_qpn_base, 0);
2035                 if (err)
2036                         goto err_counter;
2037
2038                 ibdev->ib_uc_qpns_bitmap =
2039                         kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2040                                 sizeof(long),
2041                                 GFP_KERNEL);
2042                 if (!ibdev->ib_uc_qpns_bitmap) {
2043                         dev_err(&dev->pdev->dev, "bit map alloc failed\n");
2044                         goto err_steer_qp_release;
2045                 }
2046
2047                 bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2048
2049                 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(dev, ibdev->steer_qpn_base,
2050                                 ibdev->steer_qpn_base + ibdev->steer_qpn_count - 1);
2051                 if (err)
2052                         goto err_steer_free_bitmap;
2053         }
2054
2055         if (ib_register_device(&ibdev->ib_dev, NULL))
2056                 goto err_steer_free_bitmap;
2057
2058         if (mlx4_ib_mad_init(ibdev))
2059                 goto err_reg;
2060
2061         if (mlx4_ib_init_sriov(ibdev))
2062                 goto err_mad;
2063
2064         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
2065                 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2066                 err = register_netdevice_notifier(&iboe->nb);
2067                 if (err)
2068                         goto err_sriov;
2069         }
2070
2071         for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2072                 if (device_create_file(&ibdev->ib_dev.dev,
2073                                        mlx4_class_attributes[j]))
2074                         goto err_notif;
2075         }
2076         if (sysfs_create_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group))
2077                 goto err_notif;
2078
2079         ibdev->ib_active = true;
2080
2081         if (mlx4_is_mfunc(ibdev->dev))
2082                 init_pkeys(ibdev);
2083
2084         /* create paravirt contexts for any VFs which are active */
2085         if (mlx4_is_master(ibdev->dev)) {
2086                 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2087                         if (j == mlx4_master_func_num(ibdev->dev))
2088                                 continue;
2089                         if (mlx4_is_slave_active(ibdev->dev, j))
2090                                 do_slave_init(ibdev, j, 1);
2091                 }
2092         }
2093         return ibdev;
2094
2095 err_notif:
2096         if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2097                 pr_warn("failure unregistering notifier\n");
2098         flush_workqueue(wq);
2099
2100 err_sriov:
2101         mlx4_ib_close_sriov(ibdev);
2102
2103 err_mad:
2104         mlx4_ib_mad_cleanup(ibdev);
2105
2106 err_reg:
2107         ib_unregister_device(&ibdev->ib_dev);
2108
2109 err_steer_free_bitmap:
2110         kfree(ibdev->ib_uc_qpns_bitmap);
2111
2112 err_steer_qp_release:
2113         if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED)
2114                 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2115                                 ibdev->steer_qpn_count);
2116 err_counter:
2117         for (; i; --i)
2118                 if (ibdev->counters[i - 1] != -1)
2119                         mlx4_counter_free(ibdev->dev, i, ibdev->counters[i - 1]);
2120
2121 err_map:
2122         iounmap(ibdev->priv_uar.map);
2123         mlx4_ib_free_eqs(dev, ibdev);
2124
2125 err_uar:
2126         mlx4_uar_free(dev, &ibdev->priv_uar);
2127
2128 err_pd:
2129         mlx4_pd_free(dev, ibdev->priv_pdn);
2130
2131 err_dealloc:
2132         ib_dealloc_device(&ibdev->ib_dev);
2133
2134         return NULL;
2135 }
2136
2137 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2138 {
2139         int offset;
2140
2141         WARN_ON(!dev->ib_uc_qpns_bitmap);
2142
2143         offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2144                                          dev->steer_qpn_count,
2145                                          get_count_order(count));
2146         if (offset < 0)
2147                 return offset;
2148
2149         *qpn = dev->steer_qpn_base + offset;
2150         return 0;
2151 }
2152
2153 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2154 {
2155         if (!qpn ||
2156             dev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
2157                 return;
2158
2159         BUG_ON(qpn < dev->steer_qpn_base);
2160
2161         bitmap_release_region(dev->ib_uc_qpns_bitmap,
2162                         qpn - dev->steer_qpn_base, get_count_order(count));
2163 }
2164
2165 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2166                          int is_attach)
2167 {
2168         struct ib_flow_spec spec = {
2169                 .type = IB_FLOW_IB_UC,
2170                 .l2_id.ib_uc.qpn  = mqp->ibqp.qp_num,
2171         };
2172
2173         return is_attach ?
2174                 __mlx4_ib_flow_attach(mdev, mqp, &spec, MLX4_DOMAIN_NIC, 0)
2175                 : __mlx4_ib_flow_detach(mdev, mqp, &spec, MLX4_DOMAIN_NIC, 0);
2176 }
2177
2178 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2179 {
2180         struct mlx4_ib_dev *ibdev = ibdev_ptr;
2181         int p,j;
2182
2183         mlx4_ib_close_sriov(ibdev);
2184         sysfs_remove_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group);
2185         mlx4_ib_mad_cleanup(ibdev);
2186
2187         for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2188                 device_remove_file(&ibdev->ib_dev.dev, mlx4_class_attributes[j]);
2189         }
2190
2191         ib_unregister_device(&ibdev->ib_dev);
2192
2193         if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2194                 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2195                                 ibdev->steer_qpn_count);
2196                 kfree(ibdev->ib_uc_qpns_bitmap);
2197         }
2198
2199         if (ibdev->iboe.nb.notifier_call) {
2200                 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2201                         pr_warn("failure unregistering notifier\n");
2202                 ibdev->iboe.nb.notifier_call = NULL;
2203         }
2204         iounmap(ibdev->priv_uar.map);
2205         for (p = 0; p < ibdev->num_ports; ++p)
2206                 if (ibdev->counters[p] != -1)
2207                         mlx4_counter_free(ibdev->dev, p + 1, ibdev->counters[p]);
2208         mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2209                 mlx4_CLOSE_PORT(dev, p);
2210
2211         mlx4_ib_free_eqs(dev, ibdev);
2212
2213         mlx4_uar_free(dev, &ibdev->priv_uar);
2214         mlx4_pd_free(dev, ibdev->priv_pdn);
2215         ib_dealloc_device(&ibdev->ib_dev);
2216 }
2217
2218 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2219 {
2220         struct mlx4_ib_demux_work **dm = NULL;
2221         struct mlx4_dev *dev = ibdev->dev;
2222         int i;
2223         unsigned long flags;
2224
2225         if (!mlx4_is_master(dev))
2226                 return;
2227
2228         dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
2229         if (!dm) {
2230                 pr_err("failed to allocate memory for tunneling qp update\n");
2231                 goto out;
2232         }
2233
2234         for (i = 0; i < dev->caps.num_ports; i++) {
2235                 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2236                 if (!dm[i]) {
2237                         pr_err("failed to allocate memory for tunneling qp update work struct\n");
2238                         for (i = 0; i < dev->caps.num_ports; i++) {
2239                                 if (dm[i])
2240                                         kfree(dm[i]);
2241                         }
2242                         goto out;
2243                 }
2244         }
2245         /* initialize or tear down tunnel QPs for the slave */
2246         for (i = 0; i < dev->caps.num_ports; i++) {
2247                 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2248                 dm[i]->port = i + 1;
2249                 dm[i]->slave = slave;
2250                 dm[i]->do_init = do_init;
2251                 dm[i]->dev = ibdev;
2252                 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2253                 if (!ibdev->sriov.is_going_down)
2254                         queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2255                 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2256         }
2257 out:
2258         if (dm)
2259                 kfree(dm);
2260         return;
2261 }
2262
2263 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2264                           enum mlx4_dev_event event, unsigned long param)
2265 {
2266         struct ib_event ibev;
2267         struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
2268         struct mlx4_eqe *eqe = NULL;
2269         struct ib_event_work *ew;
2270         int p = 0;
2271
2272         if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2273                 eqe = (struct mlx4_eqe *)param;
2274         else
2275                 p = (int) param;
2276
2277         switch (event) {
2278         case MLX4_DEV_EVENT_PORT_UP:
2279                 if (p > ibdev->num_ports)
2280                         return;
2281                 if (mlx4_is_master(dev) &&
2282                     rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2283                         IB_LINK_LAYER_INFINIBAND) {
2284                         mlx4_ib_invalidate_all_guid_record(ibdev, p);
2285                 }
2286                 mlx4_ib_info((struct ib_device *) ibdev_ptr,
2287                              "Port %d logical link is up\n", p);
2288                 ibev.event = IB_EVENT_PORT_ACTIVE;
2289                 break;
2290
2291         case MLX4_DEV_EVENT_PORT_DOWN:
2292                 if (p > ibdev->num_ports)
2293                         return;
2294                 mlx4_ib_info((struct ib_device *) ibdev_ptr,
2295                              "Port %d logical link is down\n", p);
2296                 ibev.event = IB_EVENT_PORT_ERR;
2297                 break;
2298
2299         case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2300                 ibdev->ib_active = false;
2301                 ibev.event = IB_EVENT_DEVICE_FATAL;
2302                 break;
2303
2304         case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2305                 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2306                 if (!ew) {
2307                         pr_err("failed to allocate memory for events work\n");
2308                         break;
2309                 }
2310
2311                 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2312                 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2313                 ew->ib_dev = ibdev;
2314                 /* need to queue only for port owner, which uses GEN_EQE */
2315                 if (mlx4_is_master(dev))
2316                         queue_work(wq, &ew->work);
2317                 else
2318                         handle_port_mgmt_change_event(&ew->work);
2319                 return;
2320
2321         case MLX4_DEV_EVENT_SLAVE_INIT:
2322                 /* here, p is the slave id */
2323                 do_slave_init(ibdev, p, 1);
2324                 return;
2325
2326         case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2327                 /* here, p is the slave id */
2328                 do_slave_init(ibdev, p, 0);
2329                 return;
2330
2331         default:
2332                 return;
2333         }
2334
2335         ibev.device           = ibdev_ptr;
2336         ibev.element.port_num = (u8) p;
2337
2338         ib_dispatch_event(&ibev);
2339 }
2340
2341 static struct mlx4_interface mlx4_ib_interface = {
2342         .add            = mlx4_ib_add,
2343         .remove         = mlx4_ib_remove,
2344         .event          = mlx4_ib_event,
2345         .protocol       = MLX4_PROT_IB_IPV6
2346 };
2347
2348 static int __init mlx4_ib_init(void)
2349 {
2350         int err;
2351
2352         wq = create_singlethread_workqueue("mlx4_ib");
2353         if (!wq)
2354                 return -ENOMEM;
2355
2356 #ifdef __linux__
2357         err = mlx4_ib_proc_init();
2358         if (err)
2359                 goto clean_wq;
2360 #endif
2361
2362         err = mlx4_ib_mcg_init();
2363         if (err)
2364                 goto clean_proc;
2365
2366         init_dev_assign();
2367
2368         err = mlx4_register_interface(&mlx4_ib_interface);
2369         if (err)
2370                 goto clean_mcg;
2371
2372         return 0;
2373
2374 clean_mcg:
2375         mlx4_ib_mcg_destroy();
2376
2377 clean_proc:
2378 #ifdef __linux__
2379         remove_proc_entry(MLX4_IB_MRS_PROC_DIR_NAME,
2380                           mlx4_ib_driver_dir_entry);
2381         remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME, NULL);
2382
2383 clean_wq:
2384 #endif
2385         destroy_workqueue(wq);
2386         return err;
2387 }
2388
2389 static void __exit mlx4_ib_cleanup(void)
2390 {
2391         mlx4_unregister_interface(&mlx4_ib_interface);
2392         mlx4_ib_mcg_destroy();
2393         destroy_workqueue(wq);
2394
2395         /* Remove proc entries */
2396 #ifdef __linux__
2397         remove_proc_entry(MLX4_IB_MRS_PROC_DIR_NAME,
2398                                 mlx4_ib_driver_dir_entry);
2399         remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME, NULL);
2400 #endif
2401
2402 }
2403
2404 module_init_order(mlx4_ib_init, SI_ORDER_MIDDLE);
2405 module_exit(mlx4_ib_cleanup);
2406
2407 #undef MODULE_VERSION
2408 #include <sys/module.h>
2409 static int
2410 mlx4ib_evhand(module_t mod, int event, void *arg)
2411 {
2412         return (0);
2413 }
2414
2415 static moduledata_t mlx4ib_mod = {
2416         .name = "mlx4ib",
2417         .evhand = mlx4ib_evhand,
2418 };
2419
2420 DECLARE_MODULE(mlx4ib, mlx4ib_mod, SI_SUB_OFED_PREINIT, SI_ORDER_ANY);
2421 MODULE_DEPEND(mlx4ib, mlx4, 1, 1, 1);
2422 MODULE_DEPEND(mlx4ib, ibcore, 1, 1, 1);