2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #define LINUXKPI_PARAM_PREFIX mlx4_
36 #include <linux/module.h>
37 #include <linux/slab.h>
38 #include <linux/errno.h>
39 #include <linux/netdevice.h>
40 #include <linux/inetdevice.h>
41 #include <linux/if_vlan.h>
45 #include <rdma/ib_smi.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_user_verbs_exp.h>
48 #include <rdma/ib_addr.h>
50 #include <dev/mlx4/driver.h>
51 #include <dev/mlx4/cmd.h>
52 #include <linux/sched.h>
53 #include <linux/page.h>
54 #include <linux/printk.h>
60 #define DRV_NAME MLX4_IB_DRV_NAME
61 #define DRV_VERSION "1.0"
62 #define DRV_RELDATE __DATE__
64 #define MLX4_IB_DRIVER_PROC_DIR_NAME "driver/mlx4_ib"
65 #define MLX4_IB_MRS_PROC_DIR_NAME "mrs"
66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
69 MODULE_AUTHOR("Roland Dreier");
70 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
71 MODULE_LICENSE("Dual BSD/GPL");
73 MODULE_VERSION(DRV_VERSION);
76 int mlx4_ib_sm_guid_assign = 1;
78 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
79 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
82 MAX_NUM_STR_BITMAP = 1 << 15,
86 static struct mlx4_dbdf2val_lst dev_assign_str = {
87 .name = "dev_assign_str param",
89 .def_val = {DEFAULT_TBL_VAL},
90 .range = {0, MAX_NUM_STR_BITMAP - 1}
92 module_param_string(dev_assign_str, dev_assign_str.str,
93 sizeof(dev_assign_str.str), 0444);
94 MODULE_PARM_DESC(dev_assign_str,
95 "Map device function numbers to IB device numbers (e.g. '0000:04:00.0-0,002b:1c:0b.a-1,...').\n"
96 "\t\tHexadecimal digits for the device function (e.g. 002b:1c:0b.a) and decimal for IB device numbers (e.g. 1).\n"
97 "\t\tMax supported devices - 32");
100 static unsigned long *dev_num_str_bitmap;
101 static spinlock_t dev_num_str_lock;
103 static const char mlx4_ib_version[] =
104 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
105 DRV_VERSION " (" DRV_RELDATE ")\n";
107 struct update_gid_work {
108 struct work_struct work;
109 union ib_gid gids[128];
110 struct mlx4_ib_dev *dev;
121 static int dr_active;
123 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
125 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, struct net_device*,
128 static u8 mlx4_ib_get_dev_port(struct net_device *dev,
129 struct mlx4_ib_dev *ibdev);
131 static struct workqueue_struct *wq;
133 static void init_query_mad(struct ib_smp *mad)
135 mad->base_version = 1;
136 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
137 mad->class_version = 1;
138 mad->method = IB_MGMT_METHOD_GET;
141 static union ib_gid zgid;
143 static int check_flow_steering_support(struct mlx4_dev *dev)
145 int eth_num_ports = 0;
146 int ib_num_ports = 0;
147 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
151 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
153 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
155 dmfs &= (!ib_num_ports ||
156 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
158 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
159 if (ib_num_ports && mlx4_is_mfunc(dev)) {
166 int mlx4_ib_query_device(struct ib_device *ibdev,
167 struct ib_device_attr *props)
169 struct mlx4_ib_dev *dev = to_mdev(ibdev);
170 struct ib_smp *in_mad = NULL;
171 struct ib_smp *out_mad = NULL;
174 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
175 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
176 if (!in_mad || !out_mad)
179 init_query_mad(in_mad);
180 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
182 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
183 1, NULL, NULL, in_mad, out_mad);
187 memset(props, 0, sizeof *props);
189 props->fw_ver = dev->dev->caps.fw_ver;
190 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
191 IB_DEVICE_PORT_ACTIVE_EVENT |
192 IB_DEVICE_SYS_IMAGE_GUID |
193 IB_DEVICE_RC_RNR_NAK_GEN |
194 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK |
197 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
198 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
199 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
200 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
201 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
202 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
203 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
204 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
205 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
206 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
207 if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
208 props->device_cap_flags |= IB_DEVICE_UD_TSO;
209 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
210 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
211 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
212 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
213 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
214 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
215 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
216 props->device_cap_flags |= IB_DEVICE_XRC;
217 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_CROSS_CHANNEL)
218 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
220 if (check_flow_steering_support(dev->dev))
221 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
224 props->device_cap_flags |= IB_DEVICE_QPG;
225 if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
226 props->device_cap_flags |= IB_DEVICE_UD_RSS;
227 props->max_rss_tbl_sz = dev->dev->caps.max_rss_tbl_sz;
229 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
230 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
231 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
232 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
233 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
235 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
237 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
239 props->vendor_part_id = dev->dev->pdev->device;
240 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
241 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
243 props->max_mr_size = ~0ull;
244 props->page_size_cap = dev->dev->caps.page_size_cap;
245 props->max_qp = dev->dev->quotas.qp;
246 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
247 props->max_sge = min(dev->dev->caps.max_sq_sg,
248 dev->dev->caps.max_rq_sg);
249 props->max_cq = dev->dev->quotas.cq;
250 props->max_cqe = dev->dev->caps.max_cqes;
251 props->max_mr = dev->dev->quotas.mpt;
252 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
253 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
254 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
255 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
256 props->max_srq = dev->dev->quotas.srq;
257 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
258 props->max_srq_sge = dev->dev->caps.max_srq_sge;
259 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
260 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
261 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
262 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
263 props->masked_atomic_cap = props->atomic_cap;
264 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
265 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
266 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
267 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
268 props->max_mcast_grp;
269 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
270 props->hca_core_clock = dev->dev->caps.hca_core_clock;
271 if (dev->dev->caps.hca_core_clock > 0)
272 props->comp_mask |= IB_DEVICE_ATTR_WITH_HCA_CORE_CLOCK;
273 if (dev->dev->caps.cq_timestamp) {
274 props->timestamp_mask = 0xFFFFFFFFFFFF;
275 props->comp_mask |= IB_DEVICE_ATTR_WITH_TIMESTAMP_MASK;
285 static enum rdma_link_layer
286 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
288 struct mlx4_dev *dev = to_mdev(device)->dev;
290 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
291 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
294 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
295 struct ib_port_attr *props, int netw_view)
297 struct ib_smp *in_mad = NULL;
298 struct ib_smp *out_mad = NULL;
299 int ext_active_speed;
300 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
303 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
304 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
305 if (!in_mad || !out_mad)
308 init_query_mad(in_mad);
309 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
310 in_mad->attr_mod = cpu_to_be32(port);
312 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
313 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
315 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
321 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
322 props->lmc = out_mad->data[34] & 0x7;
323 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
324 props->sm_sl = out_mad->data[36] & 0xf;
325 props->state = out_mad->data[32] & 0xf;
326 props->phys_state = out_mad->data[33] >> 4;
327 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
329 props->gid_tbl_len = out_mad->data[50];
331 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
332 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
333 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
334 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
335 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
336 props->active_width = out_mad->data[31] & 0xf;
337 props->active_speed = out_mad->data[35] >> 4;
338 props->max_mtu = out_mad->data[41] & 0xf;
339 props->active_mtu = out_mad->data[36] >> 4;
340 props->subnet_timeout = out_mad->data[51] & 0x1f;
341 props->max_vl_num = out_mad->data[37] >> 4;
342 props->init_type_reply = out_mad->data[41] >> 4;
344 /* Check if extended speeds (EDR/FDR/...) are supported */
345 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
346 ext_active_speed = out_mad->data[62] >> 4;
348 switch (ext_active_speed) {
350 props->active_speed = IB_SPEED_FDR;
353 props->active_speed = IB_SPEED_EDR;
358 /* If reported active speed is QDR, check if is FDR-10 */
359 if (props->active_speed == IB_SPEED_QDR) {
360 init_query_mad(in_mad);
361 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
362 in_mad->attr_mod = cpu_to_be32(port);
364 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
365 NULL, NULL, in_mad, out_mad);
369 /* Checking LinkSpeedActive for FDR-10 */
370 if (out_mad->data[15] & 0x1)
371 props->active_speed = IB_SPEED_FDR10;
374 /* Avoid wrong speed value returned by FW if the IB link is down. */
375 if (props->state == IB_PORT_DOWN)
376 props->active_speed = IB_SPEED_SDR;
384 static u8 state_to_phys_state(enum ib_port_state state)
386 return state == IB_PORT_ACTIVE ? 5 : 3;
389 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
390 struct ib_port_attr *props, int netw_view)
393 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
394 struct mlx4_ib_iboe *iboe = &mdev->iboe;
395 struct net_device *ndev;
397 struct mlx4_cmd_mailbox *mailbox;
401 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
403 return PTR_ERR(mailbox);
405 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
406 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
411 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
412 IB_WIDTH_4X : IB_WIDTH_1X;
413 props->active_speed = IB_SPEED_QDR;
414 props->port_cap_flags = IB_PORT_CM_SUP;
416 props->gid_tbl_len = MLX4_ROCE_MAX_GIDS;
418 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
420 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
421 props->pkey_tbl_len = 1;
422 props->max_mtu = IB_MTU_4096;
423 props->max_vl_num = 2;
424 props->state = IB_PORT_DOWN;
425 props->phys_state = state_to_phys_state(props->state);
426 props->active_mtu = IB_MTU_256;
427 spin_lock_irqsave(&iboe->lock, flags);
428 ndev = iboe->netdevs[port - 1];
432 tmp = iboe_get_mtu(ndev->if_mtu);
433 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
435 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
436 IB_PORT_ACTIVE : IB_PORT_DOWN;
437 props->phys_state = state_to_phys_state(props->state);
439 spin_unlock_irqrestore(&iboe->lock, flags);
441 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
445 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
446 struct ib_port_attr *props, int netw_view)
450 memset(props, 0, sizeof *props);
452 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
453 ib_link_query_port(ibdev, port, props, netw_view) :
454 eth_link_query_port(ibdev, port, props, netw_view);
459 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
460 struct ib_port_attr *props)
462 /* returns host view */
463 return __mlx4_ib_query_port(ibdev, port, props, 0);
466 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
467 union ib_gid *gid, int netw_view)
469 struct ib_smp *in_mad = NULL;
470 struct ib_smp *out_mad = NULL;
472 struct mlx4_ib_dev *dev = to_mdev(ibdev);
474 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
476 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
477 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
478 if (!in_mad || !out_mad)
481 init_query_mad(in_mad);
482 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
483 in_mad->attr_mod = cpu_to_be32(port);
485 if (mlx4_is_mfunc(dev->dev) && netw_view)
486 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
488 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
492 memcpy(gid->raw, out_mad->data + 8, 8);
494 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
496 /* For any index > 0, return the null guid */
503 init_query_mad(in_mad);
504 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
505 in_mad->attr_mod = cpu_to_be32(index / 8);
507 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
508 NULL, NULL, in_mad, out_mad);
512 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
516 memset(gid->raw + 8, 0, 8);
522 static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
525 struct mlx4_ib_dev *dev = to_mdev(ibdev);
527 *gid = dev->iboe.gid_table[port - 1][index];
532 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
535 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
536 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
538 return iboe_query_gid(ibdev, port, index, gid);
541 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
542 u16 *pkey, int netw_view)
544 struct ib_smp *in_mad = NULL;
545 struct ib_smp *out_mad = NULL;
546 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
549 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
550 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
551 if (!in_mad || !out_mad)
554 init_query_mad(in_mad);
555 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
556 in_mad->attr_mod = cpu_to_be32(index / 32);
558 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
559 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
561 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
566 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
574 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
576 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
579 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
580 struct ib_device_modify *props)
582 struct mlx4_cmd_mailbox *mailbox;
585 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
588 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
591 if (mlx4_is_slave(to_mdev(ibdev)->dev))
594 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
595 memcpy(ibdev->node_desc, props->node_desc, 64);
596 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
599 * If possible, pass node desc to FW, so it can generate
600 * a 144 trap. If cmd fails, just ignore.
602 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
606 memset(mailbox->buf, 0, 256);
607 memcpy(mailbox->buf, props->node_desc, 64);
608 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
609 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
611 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
616 static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
619 struct mlx4_cmd_mailbox *mailbox;
621 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
623 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
625 return PTR_ERR(mailbox);
627 memset(mailbox->buf, 0, 256);
629 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
630 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
631 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
633 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
634 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
637 err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
638 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
640 mlx4_free_cmd_mailbox(dev->dev, mailbox);
644 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
645 struct ib_port_modify *props)
647 struct ib_port_attr attr;
651 mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
653 err = mlx4_ib_query_port(ibdev, port, &attr);
657 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
658 ~props->clr_port_cap_mask;
660 err = mlx4_SET_PORT(to_mdev(ibdev), port,
661 !!(mask & IB_PORT_RESET_QKEY_CNTR),
665 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
669 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
670 struct ib_udata *udata)
672 struct mlx4_ib_dev *dev = to_mdev(ibdev);
673 struct mlx4_ib_ucontext *context;
674 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
675 struct mlx4_ib_alloc_ucontext_resp resp;
679 return ERR_PTR(-EAGAIN);
681 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
682 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
683 if (mlx4_wc_enabled()) {
684 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
685 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
687 resp_v3.bf_reg_size = 0;
688 resp_v3.bf_regs_per_page = 0;
691 resp.dev_caps = dev->dev->caps.userspace_caps;
692 resp.qp_tab_size = dev->dev->caps.num_qps;
693 if (mlx4_wc_enabled()) {
694 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
695 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
697 resp.bf_reg_size = 0;
698 resp.bf_regs_per_page = 0;
700 resp.cqe_size = dev->dev->caps.cqe_size;
703 context = kmalloc(sizeof *context, GFP_KERNEL);
705 return ERR_PTR(-ENOMEM);
707 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
713 INIT_LIST_HEAD(&context->db_page_list);
714 mutex_init(&context->db_page_mutex);
716 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
717 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
719 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
722 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
724 return ERR_PTR(-EFAULT);
727 return &context->ibucontext;
730 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
732 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
734 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
740 /* XXX FBSD has no support for get_unmapped_area function */
742 static unsigned long mlx4_ib_get_unmapped_area(struct file *file,
744 unsigned long len, unsigned long pgoff,
747 struct mm_struct *mm;
748 struct vm_area_struct *vma;
749 unsigned long start_addr;
750 unsigned long page_size_order;
751 unsigned long command;
755 return current->mm->get_unmapped_area(file, addr, len,
758 /* Last 8 bits hold the command others are data per that command */
759 command = pgoff & MLX4_IB_MMAP_CMD_MASK;
760 if (command != MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES)
761 return current->mm->get_unmapped_area(file, addr, len,
764 page_size_order = pgoff >> MLX4_IB_MMAP_CMD_BITS;
765 /* code is based on the huge-pages get_unmapped_area code */
766 start_addr = mm->free_area_cache;
768 if (len <= mm->cached_hole_size)
769 start_addr = TASK_UNMAPPED_BASE;
773 addr = ALIGN(start_addr, 1 << page_size_order);
775 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
776 /* At this point: (!vma || addr < vma->vm_end). */
777 if (TASK_SIZE - len < addr) {
779 * Start a new search - just in case we missed
782 if (start_addr != TASK_UNMAPPED_BASE) {
783 start_addr = TASK_UNMAPPED_BASE;
789 if (!vma || addr + len <= vma->vm_start)
791 addr = ALIGN(vma->vm_end, 1 << page_size_order);
796 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
798 struct mlx4_ib_dev *dev = to_mdev(context->device);
800 /* Last 8 bits hold the command others are data per that command */
801 unsigned long command = vma->vm_pgoff & MLX4_IB_MMAP_CMD_MASK;
803 if (command < MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
804 /* compatibility handling for commands 0 & 1*/
805 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
808 if (command == MLX4_IB_MMAP_UAR_PAGE) {
809 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
811 if (io_remap_pfn_range(vma, vma->vm_start,
812 to_mucontext(context)->uar.pfn,
813 PAGE_SIZE, vma->vm_page_prot))
815 } else if (command == MLX4_IB_MMAP_BLUE_FLAME_PAGE &&
816 dev->dev->caps.bf_reg_size != 0) {
817 vma->vm_page_prot = pgprot_wc(vma->vm_page_prot);
819 if (io_remap_pfn_range(vma, vma->vm_start,
820 to_mucontext(context)->uar.pfn +
821 dev->dev->caps.num_uars,
822 PAGE_SIZE, vma->vm_page_prot))
824 } else if (command == MLX4_IB_MMAP_GET_HW_CLOCK) {
825 struct mlx4_clock_params params;
828 ret = mlx4_get_internal_clock_params(dev->dev, ¶ms);
832 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
834 if (io_remap_pfn_range(vma, vma->vm_start,
835 (pci_resource_start(dev->dev->pdev,
836 params.bar) + params.offset)
838 PAGE_SIZE, vma->vm_page_prot))
846 static int mlx4_ib_ioctl(struct ib_ucontext *context, unsigned int cmd,
849 struct mlx4_ib_dev *dev = to_mdev(context->device);
854 case MLX4_IOCHWCLOCKOFFSET: {
855 struct mlx4_clock_params params;
857 ret = mlx4_get_internal_clock_params(dev->dev, ¶ms);
859 offset = params.offset % PAGE_SIZE;
860 ret = put_user(offset,
868 pr_err("mlx4_ib: invalid ioctl %u command with arg %lX\n",
877 static int mlx4_ib_query_values(struct ib_device *device, int q_values,
878 struct ib_device_values *values)
880 struct mlx4_ib_dev *dev = to_mdev(device);
883 values->values_mask = 0;
884 if (q_values & IBV_VALUES_HW_CLOCK) {
885 cycles = mlx4_read_clock(dev->dev);
887 values->hwclock = cycles & CORE_CLOCK_MASK;
888 values->values_mask |= IBV_VALUES_HW_CLOCK;
890 q_values &= ~IBV_VALUES_HW_CLOCK;
899 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
900 struct ib_ucontext *context,
901 struct ib_udata *udata)
903 struct mlx4_ib_pd *pd;
906 pd = kmalloc(sizeof *pd, GFP_KERNEL);
908 return ERR_PTR(-ENOMEM);
910 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
917 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
918 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
920 return ERR_PTR(-EFAULT);
926 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
928 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
934 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
935 struct ib_ucontext *context,
936 struct ib_udata *udata)
938 struct mlx4_ib_xrcd *xrcd;
941 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
942 return ERR_PTR(-ENOSYS);
944 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
946 return ERR_PTR(-ENOMEM);
948 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
952 xrcd->pd = ib_alloc_pd(ibdev);
953 if (IS_ERR(xrcd->pd)) {
954 err = PTR_ERR(xrcd->pd);
958 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
959 if (IS_ERR(xrcd->cq)) {
960 err = PTR_ERR(xrcd->cq);
964 return &xrcd->ibxrcd;
967 ib_dealloc_pd(xrcd->pd);
969 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
975 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
977 ib_destroy_cq(to_mxrcd(xrcd)->cq);
978 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
979 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
985 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
987 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
988 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
989 struct mlx4_ib_gid_entry *ge;
991 ge = kzalloc(sizeof *ge, GFP_KERNEL);
996 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
997 ge->port = mqp->port;
1001 mutex_lock(&mqp->mutex);
1002 list_add_tail(&ge->list, &mqp->gid_list);
1003 mutex_unlock(&mqp->mutex);
1008 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1012 struct net_device *ndev;
1018 spin_lock(&mdev->iboe.lock);
1019 ndev = mdev->iboe.netdevs[mqp->port - 1];
1022 spin_unlock(&mdev->iboe.lock);
1025 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
1027 dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac, 6, 0);
1036 struct mlx4_ib_steering {
1037 struct list_head list;
1042 static int parse_flow_attr(struct mlx4_dev *dev,
1043 union ib_flow_spec *ib_spec,
1044 struct _rule_hw *mlx4_spec)
1046 enum mlx4_net_trans_rule_id type;
1048 switch (ib_spec->type) {
1049 case IB_FLOW_SPEC_ETH:
1050 type = MLX4_NET_TRANS_RULE_ID_ETH;
1051 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1053 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1055 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1056 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1059 case IB_FLOW_SPEC_IB:
1060 type = MLX4_NET_TRANS_RULE_ID_IB;
1061 mlx4_spec->ib.l3_qpn = ib_spec->ib.val.l3_type_qpn;
1062 mlx4_spec->ib.qpn_mask = ib_spec->ib.mask.l3_type_qpn;
1063 memcpy(&mlx4_spec->ib.dst_gid, ib_spec->ib.val.dst_gid, 16);
1064 memcpy(&mlx4_spec->ib.dst_gid_msk,
1065 ib_spec->ib.mask.dst_gid, 16);
1068 case IB_FLOW_SPEC_IPV4:
1069 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1070 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1071 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1072 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1073 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1076 case IB_FLOW_SPEC_TCP:
1077 case IB_FLOW_SPEC_UDP:
1078 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1079 MLX4_NET_TRANS_RULE_ID_TCP :
1080 MLX4_NET_TRANS_RULE_ID_UDP;
1081 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1082 mlx4_spec->tcp_udp.dst_port_msk =
1083 ib_spec->tcp_udp.mask.dst_port;
1084 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1085 mlx4_spec->tcp_udp.src_port_msk =
1086 ib_spec->tcp_udp.mask.src_port;
1092 if (map_sw_to_hw_steering_id(dev, type) < 0 ||
1093 hw_rule_sz(dev, type) < 0)
1095 mlx4_spec->id = cpu_to_be16(map_sw_to_hw_steering_id(dev, type));
1096 mlx4_spec->size = hw_rule_sz(dev, type) >> 2;
1097 return hw_rule_sz(dev, type);
1100 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1102 enum mlx4_net_trans_promisc_mode flow_type,
1108 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1109 struct mlx4_cmd_mailbox *mailbox;
1110 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1111 size_t rule_size = sizeof(struct mlx4_net_trans_rule_hw_ctrl) +
1112 (sizeof(struct _rule_hw) * flow_attr->num_of_specs);
1114 static const u16 __mlx4_domain[] = {
1115 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1116 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1117 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1118 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1121 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1122 pr_err("Invalid priority value.\n");
1125 if (domain >= IB_FLOW_DOMAIN_NUM) {
1126 pr_err("Invalid domain value.\n");
1129 if (map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1132 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1133 if (IS_ERR(mailbox))
1134 return PTR_ERR(mailbox);
1135 memset(mailbox->buf, 0, rule_size);
1136 ctrl = mailbox->buf;
1138 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1139 flow_attr->priority);
1140 ctrl->type = map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1141 ctrl->port = flow_attr->port;
1142 ctrl->qpn = cpu_to_be32(qp->qp_num);
1144 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK)
1145 ctrl->flags = (1 << 3);
1147 ib_flow = flow_attr + 1;
1148 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1149 for (i = 0; i < flow_attr->num_of_specs; i++) {
1150 ret = parse_flow_attr(mdev->dev, ib_flow, mailbox->buf + size);
1152 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1155 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
1159 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1160 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1163 pr_err("mcg table is full. Fail to register network rule.\n");
1164 else if (ret == -ENXIO)
1165 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1167 pr_err("Invalid argumant. Fail to register network rule.\n");
1168 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1172 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1175 err = mlx4_cmd(dev, reg_id, 0, 0,
1176 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1179 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1180 (unsigned long long)reg_id);
1184 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1185 struct ib_flow_attr *flow_attr,
1189 struct mlx4_ib_flow *mflow;
1190 enum mlx4_net_trans_promisc_mode type[2];
1192 memset(type, 0, sizeof(type));
1194 mflow = kzalloc(sizeof(struct mlx4_ib_flow), GFP_KERNEL);
1200 switch (flow_attr->type) {
1201 case IB_FLOW_ATTR_NORMAL:
1202 type[0] = MLX4_FS_REGULAR;
1205 case IB_FLOW_ATTR_ALL_DEFAULT:
1206 type[0] = MLX4_FS_ALL_DEFAULT;
1209 case IB_FLOW_ATTR_MC_DEFAULT:
1210 type[0] = MLX4_FS_MC_DEFAULT;
1213 case IB_FLOW_ATTR_SNIFFER:
1214 type[0] = MLX4_FS_UC_SNIFFER;
1215 type[1] = MLX4_FS_MC_SNIFFER;
1223 while (i < ARRAY_SIZE(type) && type[i]) {
1224 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1231 return &mflow->ibflow;
1235 return ERR_PTR(err);
1238 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1242 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1243 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1245 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
1246 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
1256 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1258 struct mlx4_ib_gid_entry *ge;
1259 struct mlx4_ib_gid_entry *tmp;
1260 struct mlx4_ib_gid_entry *ret = NULL;
1262 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1263 if (!memcmp(raw, ge->gid.raw, 16)) {
1273 static int del_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1275 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1276 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1277 struct mlx4_ib_gid_entry *ge;
1278 struct net_device *ndev;
1281 mutex_lock(&mqp->mutex);
1282 ge = find_gid_entry(mqp, gid->raw);
1284 spin_lock(&mdev->iboe.lock);
1285 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1288 spin_unlock(&mdev->iboe.lock);
1289 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
1292 dev_mc_delete(mdev->iboe.netdevs[ge->port - 1], mac, 6, 0);
1296 list_del(&ge->list);
1299 pr_warn("could not find mgid entry\n");
1301 mutex_unlock(&mqp->mutex);
1302 return ge != NULL ? 0 : -EINVAL;
1305 static int _mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid,
1309 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1310 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1314 if (mdev->dev->caps.steering_mode ==
1315 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1316 struct mlx4_ib_steering *ib_steering;
1317 struct mlx4_ib_steering *tmp;
1320 mutex_lock(&mqp->mutex);
1321 list_for_each_entry_safe(ib_steering, tmp, &mqp->steering_rules,
1323 if (memcmp(ib_steering->gid.raw, gid->raw, 16))
1329 list_del(&ib_steering->list);
1330 list_add(&ib_steering->list, &temp);
1332 mutex_unlock(&mqp->mutex);
1333 list_for_each_entry_safe(ib_steering, tmp, &temp,
1335 reg_id = ib_steering->reg_id;
1337 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp,
1339 (ibqp->qp_type == IB_QPT_RAW_PACKET) ?
1340 MLX4_PROT_ETH : MLX4_PROT_IB_IPV6,
1343 record_err = record_err ?: err;
1347 err = del_gid_entry(ibqp, gid);
1349 record_err = record_err ?: err;
1353 list_del(&ib_steering->list);
1356 mutex_lock(&mqp->mutex);
1357 list_for_each_entry(ib_steering, &temp, list) {
1358 list_add(&ib_steering->list, &mqp->steering_rules);
1360 mutex_unlock(&mqp->mutex);
1362 pr_warn("Couldn't release all reg_ids for mgid. Steering rule is left attached\n");
1367 if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_B0 &&
1368 ibqp->qp_type == IB_QPT_RAW_PACKET)
1369 gid->raw[5] = mqp->port;
1371 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1372 (ibqp->qp_type == IB_QPT_RAW_PACKET) ?
1373 MLX4_PROT_ETH : MLX4_PROT_IB_IPV6,
1378 err = del_gid_entry(ibqp, gid);
1387 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1389 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1390 int count = (mdev->dev->caps.steering_mode ==
1391 MLX4_STEERING_MODE_DEVICE_MANAGED) ?
1392 mdev->dev->caps.num_ports : 1;
1394 return _mlx4_ib_mcg_detach(ibqp, gid, lid, count);
1397 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1400 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1401 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1402 DECLARE_BITMAP(ports, MLX4_MAX_PORTS);
1405 if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_B0 &&
1406 ibqp->qp_type == IB_QPT_RAW_PACKET)
1407 gid->raw[5] = mqp->port;
1409 if (mdev->dev->caps.steering_mode ==
1410 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1411 bitmap_fill(ports, mdev->dev->caps.num_ports);
1413 if (mqp->port <= mdev->dev->caps.num_ports) {
1414 bitmap_zero(ports, mdev->dev->caps.num_ports);
1421 for (; i < mdev->dev->caps.num_ports; i++) {
1423 struct mlx4_ib_steering *ib_steering = NULL;
1424 if (!test_bit(i, ports))
1426 if (mdev->dev->caps.steering_mode ==
1427 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1428 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1433 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp,
1436 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1437 (ibqp->qp_type == IB_QPT_RAW_PACKET) ?
1438 MLX4_PROT_ETH : MLX4_PROT_IB_IPV6,
1445 err = add_gid_entry(ibqp, gid);
1447 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1448 MLX4_PROT_IB_IPV6, reg_id);
1454 memcpy(ib_steering->gid.raw, gid->raw, 16);
1455 mutex_lock(&mqp->mutex);
1456 list_add(&ib_steering->list, &mqp->steering_rules);
1457 mutex_unlock(&mqp->mutex);
1458 ib_steering->reg_id = reg_id;
1467 _mlx4_ib_mcg_detach(ibqp, gid, lid, i);
1472 static int init_node_data(struct mlx4_ib_dev *dev)
1474 struct ib_smp *in_mad = NULL;
1475 struct ib_smp *out_mad = NULL;
1476 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1479 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1480 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1481 if (!in_mad || !out_mad)
1484 init_query_mad(in_mad);
1485 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1486 if (mlx4_is_master(dev->dev))
1487 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1489 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1493 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1495 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1497 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1501 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1502 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1510 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1513 struct mlx4_ib_dev *dev =
1514 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1515 return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
1518 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1521 struct mlx4_ib_dev *dev =
1522 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1523 return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1524 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1525 (int) dev->dev->caps.fw_ver & 0xffff);
1528 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1531 struct mlx4_ib_dev *dev =
1532 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1533 return sprintf(buf, "%x\n", dev->dev->rev_id);
1536 static ssize_t show_board(struct device *device, struct device_attribute *attr,
1539 struct mlx4_ib_dev *dev =
1540 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1541 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1542 dev->dev->board_id);
1545 static ssize_t show_vsd(struct device *device, struct device_attribute *attr,
1548 struct mlx4_ib_dev *dev =
1549 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1550 ssize_t len = MLX4_VSD_LEN;
1552 if (dev->dev->vsd_vendor_id == PCI_VENDOR_ID_MELLANOX)
1553 len = sprintf(buf, "%.*s\n", MLX4_VSD_LEN, dev->dev->vsd);
1555 memcpy(buf, dev->dev->vsd, MLX4_VSD_LEN);
1560 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1561 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1562 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1563 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1564 static DEVICE_ATTR(vsd, S_IRUGO, show_vsd, NULL);
1566 static struct device_attribute *mlx4_class_attributes[] = {
1574 static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev, u8 port)
1576 memcpy(eui, IF_LLADDR(dev), 3);
1577 memcpy(eui + 5, IF_LLADDR(dev) + 3, 3);
1578 if (vlan_id < 0x1000) {
1579 eui[3] = vlan_id >> 8;
1580 eui[4] = vlan_id & 0xff;
1588 static void update_gids_task(struct work_struct *work)
1590 struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
1591 struct mlx4_cmd_mailbox *mailbox;
1594 struct mlx4_dev *dev = gw->dev->dev;
1597 mailbox = mlx4_alloc_cmd_mailbox(dev);
1598 if (IS_ERR(mailbox)) {
1599 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
1603 gids = mailbox->buf;
1604 memcpy(gids, gw->gids, sizeof gw->gids);
1606 if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
1607 IB_LINK_LAYER_ETHERNET) {
1608 err = mlx4_cmd(dev, mailbox->dma,
1609 MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1610 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1614 pr_warn("set port command failed\n");
1616 mlx4_ib_dispatch_event(gw->dev, gw->port,
1617 IB_EVENT_GID_CHANGE);
1620 mlx4_free_cmd_mailbox(dev, mailbox);
1625 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
1627 struct mlx4_ib_dev *ibdev = to_mdev(device);
1628 return mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
1631 static void reset_gids_task(struct work_struct *work)
1633 struct update_gid_work *gw =
1634 container_of(work, struct update_gid_work, work);
1635 struct mlx4_cmd_mailbox *mailbox;
1638 struct mlx4_dev *dev = gw->dev->dev;
1640 mailbox = mlx4_alloc_cmd_mailbox(dev);
1641 if (IS_ERR(mailbox)) {
1642 pr_warn("reset gid table failed\n");
1646 gids = mailbox->buf;
1647 memcpy(gids, gw->gids, sizeof(gw->gids));
1649 if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, 1) ==
1650 IB_LINK_LAYER_ETHERNET &&
1651 dev->caps.num_ports > 0) {
1652 err = mlx4_cmd(dev, mailbox->dma,
1653 MLX4_SET_PORT_GID_TABLE << 8 | 1,
1654 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1657 pr_warn("set port 1 command failed\n");
1660 if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, 2) ==
1661 IB_LINK_LAYER_ETHERNET &&
1662 dev->caps.num_ports > 1) {
1663 err = mlx4_cmd(dev, mailbox->dma,
1664 MLX4_SET_PORT_GID_TABLE << 8 | 2,
1665 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1668 pr_warn("set port 2 command failed\n");
1671 mlx4_free_cmd_mailbox(dev, mailbox);
1676 static int update_gid_table(struct mlx4_ib_dev *dev, int port,
1677 union ib_gid *gid, int clear, int default_gid)
1679 struct update_gid_work *work;
1681 int need_update = 0;
1685 int start_index = !default_gid;
1687 max_gids = dev->dev->caps.gid_table_len[port];
1688 for (i = start_index; i < max_gids; ++i) {
1689 if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
1696 dev->iboe.gid_table[port - 1][found] = zgid;
1704 !memcmp(&dev->iboe.gid_table[port - 1][i],
1705 &zgid, sizeof(*gid)))
1710 if (found == -1 && !clear && free < 0) {
1711 pr_err("GID table of port %d is full. Can't add "GID_PRINT_FMT"\n",
1712 port, GID_PRINT_ARGS(gid));
1715 if (found == -1 && clear) {
1716 pr_err(GID_PRINT_FMT" is not in GID table of port %d\n", GID_PRINT_ARGS(gid), port);
1719 if (found == -1 && !clear && free >= 0) {
1720 dev->iboe.gid_table[port - 1][free] = *gid;
1727 work = kzalloc(sizeof *work, GFP_ATOMIC);
1731 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
1732 INIT_WORK(&work->work, update_gids_task);
1735 queue_work(wq, &work->work);
1740 static int reset_gid_table(struct mlx4_ib_dev *dev)
1742 struct update_gid_work *work;
1745 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1749 memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table));
1750 memset(work->gids, 0, sizeof(work->gids));
1751 INIT_WORK(&work->work, reset_gids_task);
1753 queue_work(wq, &work->work);
1757 /* XXX BOND Related - stub (no support for these flags in FBSD)*/
1758 static inline int netif_is_bond_master(struct net_device *dev)
1761 return (dev->flags & IFF_MASTER) && (dev->priv_flags & IFF_BONDING);
1766 static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid, u8 port)
1768 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1769 mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev, port);
1772 static u8 mlx4_ib_get_dev_port(struct net_device *dev, struct mlx4_ib_dev *ibdev)
1775 struct mlx4_ib_iboe *iboe;
1776 struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
1777 rdma_vlan_dev_real_dev(dev) : dev;
1779 iboe = &ibdev->iboe;
1781 for (port = 1; port <= MLX4_MAX_PORTS; ++port)
1782 if ((netif_is_bond_master(real_dev) && (real_dev == iboe->masters[port - 1])) ||
1783 (!netif_is_bond_master(real_dev) && (real_dev == iboe->netdevs[port - 1])))
1786 return port > MLX4_MAX_PORTS ? 0 : port;
1789 static void mlx4_ib_get_dev_addr(struct net_device *dev, struct mlx4_ib_dev *ibdev, u8 port)
1792 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1793 struct inet6_dev *in6_dev;
1795 struct inet6_ifaddr *ifp;
1800 if ((port == 0) || (port > MLX4_MAX_PORTS))
1804 TAILQ_FOREACH(ifa, &dev->if_addrhead, ifa_link) {
1805 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET){
1806 ipv6_addr_set_v4mapped(
1807 ((struct sockaddr_in *) ifa->ifa_addr)->sin_addr.s_addr,
1808 (struct in6_addr *)&gid);
1809 update_gid_table(ibdev, port, &gid, 0, 0);
1813 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1815 in6_dev = in6_dev_get(dev);
1817 read_lock_bh(&in6_dev->lock);
1818 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
1819 pgid = (union ib_gid *)&ifp->addr;
1820 update_gid_table(ibdev, port, pgid, 0, 0);
1822 read_unlock_bh(&in6_dev->lock);
1823 in6_dev_put(in6_dev);
1828 static void mlx4_set_default_gid(struct mlx4_ib_dev *ibdev,
1829 struct net_device *dev, u8 port)
1832 mlx4_make_default_gid(dev, &gid, port);
1833 update_gid_table(ibdev, port, &gid, 0, 1);
1836 static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1838 struct net_device *dev;
1840 if (reset_gid_table(ibdev))
1843 IFNET_RLOCK_NOSLEEP();
1844 TAILQ_FOREACH(dev, &V_ifnet, if_link) {
1845 u8 port = mlx4_ib_get_dev_port(dev, ibdev);
1847 if (!rdma_vlan_dev_real_dev(dev) &&
1848 !netif_is_bond_master(dev))
1849 mlx4_set_default_gid(ibdev, dev, port);
1850 mlx4_ib_get_dev_addr(dev, ibdev, port);
1854 IFNET_RUNLOCK_NOSLEEP();
1859 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1860 struct net_device *dev, unsigned long event)
1862 struct mlx4_ib_iboe *iboe;
1865 unsigned long flags;
1867 iboe = &ibdev->iboe;
1869 spin_lock_irqsave(&iboe->lock, flags);
1870 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1871 struct net_device *old_netdev = iboe->netdevs[port - 1];
1872 /* XXX BOND related */
1874 struct net_device *old_master = iboe->masters[port - 1];
1876 iboe->masters[port - 1] = NULL;
1877 iboe->netdevs[port - 1] =
1878 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1881 if (old_netdev != iboe->netdevs[port - 1])
1883 if (dev == iboe->netdevs[port - 1] &&
1884 event == NETDEV_CHANGEADDR)
1886 /* XXX BOND related */
1888 if (iboe->netdevs[port - 1] && netif_is_bond_slave(iboe->netdevs[port - 1]))
1889 iboe->masters[port - 1] = iboe->netdevs[port - 1]->master;
1891 /* if bonding is used it is possible that we add it to masters only after
1892 IP address is assigned to the net bonding interface */
1893 if (old_master != iboe->masters[port - 1])
1898 spin_unlock_irqrestore(&iboe->lock, flags);
1901 if (mlx4_ib_init_gid_table(ibdev))
1902 pr_warn("Fail to reset gid table\n");
1905 static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
1908 struct net_device *dev = ptr;
1909 struct mlx4_ib_dev *ibdev;
1911 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1913 mlx4_ib_scan_netdevs(ibdev, dev, event);
1918 /* This function initializes the gid table only if the event_netdev real device is an iboe
1919 * device, will be invoked by the inet/inet6 events */
1920 static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
1923 struct net_device *event_netdev = ptr;
1924 struct mlx4_ib_dev *ibdev;
1925 struct mlx4_ib_iboe *ibdev_iboe;
1928 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
1930 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
1931 rdma_vlan_dev_real_dev(event_netdev) :
1934 ibdev_iboe = &ibdev->iboe;
1936 port = mlx4_ib_get_dev_port(real_dev, ibdev);
1938 /* Perform init_gid_table if the event real_dev is the net_device which represents this port,
1939 * otherwise this event is not related and would be ignored.*/
1940 if(port && (real_dev == ibdev_iboe->netdevs[port - 1]))
1941 if (mlx4_ib_init_gid_table(ibdev))
1942 pr_warn("Fail to reset gid table\n");
1948 static void init_pkeys(struct mlx4_ib_dev *ibdev)
1954 if (mlx4_is_master(ibdev->dev)) {
1955 for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
1956 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1958 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1960 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
1961 /* master has the identity virt2phys pkey mapping */
1962 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
1963 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
1964 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
1965 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
1969 /* initialize pkey cache */
1970 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1972 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1974 ibdev->pkeys.phys_pkey_cache[port-1][i] =
1980 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1983 int eq_per_port = 0;
1988 /* Legacy mode or comp_pool is not large enough */
1989 if (dev->caps.comp_pool == 0 ||
1990 dev->caps.num_ports > dev->caps.comp_pool)
1993 eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
1994 dev->caps.num_ports);
1998 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
1999 added_eqs += eq_per_port;
2001 total_eqs = dev->caps.num_comp_vectors + added_eqs;
2003 ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
2004 if (!ibdev->eq_table)
2007 ibdev->eq_added = added_eqs;
2010 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
2011 for (j = 0; j < eq_per_port; j++) {
2012 sprintf(name, "mlx4-ib-%d-%d@%d:%d:%d:%d", i, j,
2013 pci_get_domain(dev->pdev->dev.bsddev),
2014 pci_get_bus(dev->pdev->dev.bsddev),
2015 PCI_SLOT(dev->pdev->devfn),
2016 PCI_FUNC(dev->pdev->devfn));
2018 /* Set IRQ for specific name (per ring) */
2019 if (mlx4_assign_eq(dev, name,
2020 &ibdev->eq_table[eq])) {
2021 /* Use legacy (same as mlx4_en driver) */
2022 pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
2023 ibdev->eq_table[eq] =
2024 (eq % dev->caps.num_comp_vectors);
2030 /* Fill the reset of the vector with legacy EQ */
2031 for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
2032 ibdev->eq_table[eq++] = i;
2034 /* Advertise the new number of EQs to clients */
2035 ibdev->ib_dev.num_comp_vectors = total_eqs;
2038 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2042 /* no additional eqs were added */
2043 if (!ibdev->eq_table)
2046 /* Reset the advertised EQ number */
2047 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2049 /* Free only the added eqs */
2050 for (i = 0; i < ibdev->eq_added; i++) {
2051 /* Don't free legacy eqs if used */
2052 if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
2054 mlx4_release_eq(dev, ibdev->eq_table[i]);
2057 kfree(ibdev->eq_table);
2061 * create show function and a device_attribute struct pointing to
2062 * the function for _name
2064 #define DEVICE_DIAG_RPRT_ATTR(_name, _offset, _op_mod) \
2065 static ssize_t show_rprt_##_name(struct device *dev, \
2066 struct device_attribute *attr, \
2068 return show_diag_rprt(dev, buf, _offset, _op_mod); \
2070 static DEVICE_ATTR(_name, S_IRUGO, show_rprt_##_name, NULL);
2072 #define MLX4_DIAG_RPRT_CLEAR_DIAGS 3
2074 static size_t show_diag_rprt(struct device *device, char *buf,
2075 u32 offset, u8 op_modifier)
2078 u32 counter_offset = offset;
2079 u32 diag_counter = 0;
2080 struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
2083 ret = mlx4_query_diag_counters(dev->dev, 1, op_modifier,
2084 &counter_offset, &diag_counter);
2088 return sprintf(buf, "%d\n", diag_counter);
2091 static ssize_t clear_diag_counters(struct device *device,
2092 struct device_attribute *attr,
2093 const char *buf, size_t length)
2096 struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
2099 ret = mlx4_query_diag_counters(dev->dev, 0, MLX4_DIAG_RPRT_CLEAR_DIAGS,
2107 DEVICE_DIAG_RPRT_ATTR(rq_num_lle , 0x00, 2);
2108 DEVICE_DIAG_RPRT_ATTR(sq_num_lle , 0x04, 2);
2109 DEVICE_DIAG_RPRT_ATTR(rq_num_lqpoe , 0x08, 2);
2110 DEVICE_DIAG_RPRT_ATTR(sq_num_lqpoe , 0x0C, 2);
2111 DEVICE_DIAG_RPRT_ATTR(rq_num_lpe , 0x18, 2);
2112 DEVICE_DIAG_RPRT_ATTR(sq_num_lpe , 0x1C, 2);
2113 DEVICE_DIAG_RPRT_ATTR(rq_num_wrfe , 0x20, 2);
2114 DEVICE_DIAG_RPRT_ATTR(sq_num_wrfe , 0x24, 2);
2115 DEVICE_DIAG_RPRT_ATTR(sq_num_mwbe , 0x2C, 2);
2116 DEVICE_DIAG_RPRT_ATTR(sq_num_bre , 0x34, 2);
2117 DEVICE_DIAG_RPRT_ATTR(rq_num_lae , 0x38, 2);
2118 DEVICE_DIAG_RPRT_ATTR(sq_num_rire , 0x44, 2);
2119 DEVICE_DIAG_RPRT_ATTR(rq_num_rire , 0x48, 2);
2120 DEVICE_DIAG_RPRT_ATTR(sq_num_rae , 0x4C, 2);
2121 DEVICE_DIAG_RPRT_ATTR(rq_num_rae , 0x50, 2);
2122 DEVICE_DIAG_RPRT_ATTR(sq_num_roe , 0x54, 2);
2123 DEVICE_DIAG_RPRT_ATTR(sq_num_tree , 0x5C, 2);
2124 DEVICE_DIAG_RPRT_ATTR(sq_num_rree , 0x64, 2);
2125 DEVICE_DIAG_RPRT_ATTR(rq_num_rnr , 0x68, 2);
2126 DEVICE_DIAG_RPRT_ATTR(sq_num_rnr , 0x6C, 2);
2127 DEVICE_DIAG_RPRT_ATTR(rq_num_oos , 0x100, 2);
2128 DEVICE_DIAG_RPRT_ATTR(sq_num_oos , 0x104, 2);
2129 DEVICE_DIAG_RPRT_ATTR(rq_num_mce , 0x108, 2);
2130 DEVICE_DIAG_RPRT_ATTR(rq_num_udsdprd , 0x118, 2);
2131 DEVICE_DIAG_RPRT_ATTR(rq_num_ucsdprd , 0x120, 2);
2132 DEVICE_DIAG_RPRT_ATTR(num_cqovf , 0x1A0, 2);
2133 DEVICE_DIAG_RPRT_ATTR(num_eqovf , 0x1A4, 2);
2134 DEVICE_DIAG_RPRT_ATTR(num_baddb , 0x1A8, 2);
2136 static DEVICE_ATTR(clear_diag, S_IWUSR, NULL, clear_diag_counters);
2138 static struct attribute *diag_rprt_attrs[] = {
2139 &dev_attr_rq_num_lle.attr,
2140 &dev_attr_sq_num_lle.attr,
2141 &dev_attr_rq_num_lqpoe.attr,
2142 &dev_attr_sq_num_lqpoe.attr,
2143 &dev_attr_rq_num_lpe.attr,
2144 &dev_attr_sq_num_lpe.attr,
2145 &dev_attr_rq_num_wrfe.attr,
2146 &dev_attr_sq_num_wrfe.attr,
2147 &dev_attr_sq_num_mwbe.attr,
2148 &dev_attr_sq_num_bre.attr,
2149 &dev_attr_rq_num_lae.attr,
2150 &dev_attr_sq_num_rire.attr,
2151 &dev_attr_rq_num_rire.attr,
2152 &dev_attr_sq_num_rae.attr,
2153 &dev_attr_rq_num_rae.attr,
2154 &dev_attr_sq_num_roe.attr,
2155 &dev_attr_sq_num_tree.attr,
2156 &dev_attr_sq_num_rree.attr,
2157 &dev_attr_rq_num_rnr.attr,
2158 &dev_attr_sq_num_rnr.attr,
2159 &dev_attr_rq_num_oos.attr,
2160 &dev_attr_sq_num_oos.attr,
2161 &dev_attr_rq_num_mce.attr,
2162 &dev_attr_rq_num_udsdprd.attr,
2163 &dev_attr_rq_num_ucsdprd.attr,
2164 &dev_attr_num_cqovf.attr,
2165 &dev_attr_num_eqovf.attr,
2166 &dev_attr_num_baddb.attr,
2167 &dev_attr_clear_diag.attr,
2171 static struct attribute_group diag_counters_group = {
2172 .name = "diag_counters",
2173 .attrs = diag_rprt_attrs
2176 static void init_dev_assign(void)
2180 spin_lock_init(&dev_num_str_lock);
2181 if (mlx4_fill_dbdf2val_tbl(&dev_assign_str))
2183 dev_num_str_bitmap =
2184 kmalloc(BITS_TO_LONGS(MAX_NUM_STR_BITMAP) * sizeof(long),
2186 if (!dev_num_str_bitmap) {
2187 pr_warn("bitmap alloc failed -- cannot apply dev_assign_str parameter\n");
2190 bitmap_zero(dev_num_str_bitmap, MAX_NUM_STR_BITMAP);
2191 while ((i < MLX4_DEVS_TBL_SIZE) && (dev_assign_str.tbl[i].dbdf !=
2193 if (bitmap_allocate_region(dev_num_str_bitmap,
2194 dev_assign_str.tbl[i].val[0], 0))
2202 kfree(dev_num_str_bitmap);
2203 dev_num_str_bitmap = NULL;
2204 pr_warn("mlx4_ib: The value of 'dev_assign_str' parameter "
2205 "is incorrect. The parameter value is discarded!");
2208 static int mlx4_ib_dev_idx(struct mlx4_dev *dev)
2216 if (mlx4_get_val(dev_assign_str.tbl, dev->pdev, 0, &val))
2219 if (val != DEFAULT_TBL_VAL) {
2220 dev->flags |= MLX4_FLAG_DEV_NUM_STR;
2224 spin_lock(&dev_num_str_lock);
2225 i = bitmap_find_free_region(dev_num_str_bitmap, MAX_NUM_STR_BITMAP, 0);
2226 spin_unlock(&dev_num_str_lock);
2233 static void *mlx4_ib_add(struct mlx4_dev *dev)
2235 struct mlx4_ib_dev *ibdev;
2239 struct mlx4_ib_iboe *iboe;
2242 pr_info_once("%s", mlx4_ib_version);
2244 mlx4_foreach_ib_transport_port(i, dev)
2247 /* No point in registering a device with no ports... */
2251 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2253 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
2257 iboe = &ibdev->iboe;
2259 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2262 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2265 ibdev->priv_uar.map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT,
2268 if (!ibdev->priv_uar.map)
2271 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2275 dev_idx = mlx4_ib_dev_idx(dev);
2277 sprintf(ibdev->ib_dev.name, "mlx4_%d", dev_idx);
2279 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2281 ibdev->ib_dev.owner = THIS_MODULE;
2282 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
2283 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
2284 ibdev->num_ports = num_ports;
2285 ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
2286 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2287 ibdev->ib_dev.dma_device = &dev->pdev->dev;
2289 if (dev->caps.userspace_caps)
2290 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2292 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2294 ibdev->ib_dev.uverbs_cmd_mask =
2295 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2296 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2297 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2298 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2299 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2300 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2301 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2302 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2303 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2304 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2305 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2306 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2307 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2308 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2309 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2310 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2311 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2312 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2313 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2314 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2315 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2316 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
2317 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
2319 ibdev->ib_dev.query_device = mlx4_ib_query_device;
2320 ibdev->ib_dev.query_port = mlx4_ib_query_port;
2321 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
2322 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
2323 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
2324 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
2325 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
2326 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
2327 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
2328 ibdev->ib_dev.mmap = mlx4_ib_mmap;
2329 /* XXX FBSD has no support for get_unmapped_area function */
2331 ibdev->ib_dev.get_unmapped_area = mlx4_ib_get_unmapped_area;
2333 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
2334 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
2335 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
2336 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
2337 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
2338 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
2339 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
2340 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
2341 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
2342 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
2343 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
2344 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
2345 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
2346 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
2347 ibdev->ib_dev.post_send = mlx4_ib_post_send;
2348 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
2349 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
2350 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
2351 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
2352 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
2353 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
2354 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2355 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2356 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
2357 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
2358 ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
2359 ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
2360 ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
2361 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
2362 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
2363 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
2364 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
2365 ibdev->ib_dev.ioctl = mlx4_ib_ioctl;
2366 ibdev->ib_dev.query_values = mlx4_ib_query_values;
2368 if (!mlx4_is_slave(ibdev->dev)) {
2369 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
2370 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
2371 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
2372 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2375 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW) {
2376 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
2377 ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
2378 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2380 ibdev->ib_dev.uverbs_cmd_mask |=
2381 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2382 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2385 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2386 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2387 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2388 ibdev->ib_dev.uverbs_cmd_mask |=
2389 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2390 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2394 * Set experimental data
2396 ibdev->ib_dev.uverbs_exp_cmd_mask =
2397 (1ull << IB_USER_VERBS_EXP_CMD_CREATE_QP) |
2398 (1ull << IB_USER_VERBS_EXP_CMD_MODIFY_CQ) |
2399 (1ull << IB_USER_VERBS_EXP_CMD_QUERY_DEVICE) |
2400 (1ull << IB_USER_VERBS_EXP_CMD_CREATE_CQ);
2401 ibdev->ib_dev.exp_create_qp = mlx4_ib_exp_create_qp;
2402 ibdev->ib_dev.exp_query_device = mlx4_ib_exp_query_device;
2403 if (check_flow_steering_support(dev)) {
2404 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2405 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2406 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2407 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
2408 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
2410 pr_debug("Device managed flow steering is unavailable for this configuration.\n");
2413 * End of experimental data
2416 mlx4_ib_alloc_eqs(dev, ibdev);
2418 spin_lock_init(&iboe->lock);
2420 if (init_node_data(ibdev))
2423 for (i = 0; i < ibdev->num_ports; ++i) {
2424 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2425 IB_LINK_LAYER_ETHERNET) {
2426 if (mlx4_is_slave(dev)) {
2427 ibdev->counters[i].status = mlx4_counter_alloc(ibdev->dev,
2429 &ibdev->counters[i].counter_index);
2430 } else {/* allocating the PF IB default counter indices reserved in mlx4_init_counters_table */
2431 ibdev->counters[i].counter_index = ((i + 1) << 1) - 1;
2432 ibdev->counters[i].status = 0;
2435 dev_info(&dev->pdev->dev,
2436 "%s: allocated counter index %d for port %d\n",
2437 __func__, ibdev->counters[i].counter_index, i+1);
2439 ibdev->counters[i].counter_index = MLX4_SINK_COUNTER_INDEX;
2440 ibdev->counters[i].status = -ENOSPC;
2444 spin_lock_init(&ibdev->sm_lock);
2445 mutex_init(&ibdev->cap_mask_mutex);
2447 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2448 !mlx4_is_mfunc(dev)) {
2449 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2450 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2451 MLX4_IB_UC_STEER_QPN_ALIGN, &ibdev->steer_qpn_base, 0);
2455 ibdev->ib_uc_qpns_bitmap =
2456 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2459 if (!ibdev->ib_uc_qpns_bitmap) {
2460 dev_err(&dev->pdev->dev, "bit map alloc failed\n");
2461 goto err_steer_qp_release;
2464 bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2466 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(dev, ibdev->steer_qpn_base,
2467 ibdev->steer_qpn_base + ibdev->steer_qpn_count - 1);
2469 goto err_steer_free_bitmap;
2472 if (ib_register_device(&ibdev->ib_dev, NULL))
2473 goto err_steer_free_bitmap;
2475 if (mlx4_ib_mad_init(ibdev))
2478 if (mlx4_ib_init_sriov(ibdev))
2481 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
2482 if (!iboe->nb.notifier_call) {
2483 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2484 err = register_netdevice_notifier(&iboe->nb);
2486 iboe->nb.notifier_call = NULL;
2490 if (!iboe->nb_inet.notifier_call) {
2491 iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
2492 err = register_inetaddr_notifier(&iboe->nb_inet);
2494 iboe->nb_inet.notifier_call = NULL;
2498 mlx4_ib_scan_netdevs(ibdev, NULL, 0);
2500 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2501 if (device_create_file(&ibdev->ib_dev.dev,
2502 mlx4_class_attributes[j]))
2505 if (sysfs_create_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group))
2508 ibdev->ib_active = true;
2510 if (mlx4_is_mfunc(ibdev->dev))
2513 /* create paravirt contexts for any VFs which are active */
2514 if (mlx4_is_master(ibdev->dev)) {
2515 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2516 if (j == mlx4_master_func_num(ibdev->dev))
2518 if (mlx4_is_slave_active(ibdev->dev, j))
2519 do_slave_init(ibdev, j, 1);
2525 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2526 device_remove_file(&ibdev->ib_dev.dev,
2527 mlx4_class_attributes[j]);
2530 if (ibdev->iboe.nb.notifier_call) {
2531 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2532 pr_warn("failure unregistering notifier\n");
2533 ibdev->iboe.nb.notifier_call = NULL;
2535 if (ibdev->iboe.nb_inet.notifier_call) {
2536 if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2537 pr_warn("failure unregistering notifier\n");
2538 ibdev->iboe.nb_inet.notifier_call = NULL;
2540 flush_workqueue(wq);
2542 mlx4_ib_close_sriov(ibdev);
2545 mlx4_ib_mad_cleanup(ibdev);
2548 ib_unregister_device(&ibdev->ib_dev);
2550 err_steer_free_bitmap:
2551 kfree(ibdev->ib_uc_qpns_bitmap);
2553 err_steer_qp_release:
2554 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED)
2555 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2556 ibdev->steer_qpn_count);
2559 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i) ==
2560 IB_LINK_LAYER_ETHERNET) {
2561 mlx4_counter_free(ibdev->dev,
2563 ibdev->counters[i - 1].counter_index);
2568 iounmap(ibdev->priv_uar.map);
2569 mlx4_ib_free_eqs(dev, ibdev);
2572 mlx4_uar_free(dev, &ibdev->priv_uar);
2575 mlx4_pd_free(dev, ibdev->priv_pdn);
2578 ib_dealloc_device(&ibdev->ib_dev);
2583 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2587 WARN_ON(!dev->ib_uc_qpns_bitmap);
2589 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2590 dev->steer_qpn_count,
2591 get_count_order(count));
2595 *qpn = dev->steer_qpn_base + offset;
2599 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2602 dev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
2605 BUG_ON(qpn < dev->steer_qpn_base);
2607 bitmap_release_region(dev->ib_uc_qpns_bitmap,
2608 qpn - dev->steer_qpn_base, get_count_order(count));
2611 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2616 struct ib_flow_attr *flow = NULL;
2617 struct ib_flow_spec_ib *ib_spec;
2620 flow_size = sizeof(struct ib_flow_attr) +
2621 sizeof(struct ib_flow_spec_ib);
2622 flow = kzalloc(flow_size, GFP_KERNEL);
2625 flow->port = mqp->port;
2626 flow->num_of_specs = 1;
2627 flow->size = flow_size;
2628 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2629 ib_spec->type = IB_FLOW_SPEC_IB;
2630 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2631 ib_spec->val.l3_type_qpn = mqp->ibqp.qp_num;
2632 ib_spec->mask.l3_type_qpn = MLX4_IB_FLOW_QPN_MASK;
2634 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
2639 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2645 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2647 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2651 if (ibdev->iboe.nb_inet.notifier_call) {
2652 if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2653 pr_warn("failure unregistering notifier\n");
2654 ibdev->iboe.nb_inet.notifier_call = NULL;
2657 mlx4_ib_close_sriov(ibdev);
2658 sysfs_remove_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group);
2659 mlx4_ib_mad_cleanup(ibdev);
2661 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2662 device_remove_file(&ibdev->ib_dev.dev,
2663 mlx4_class_attributes[j]);
2668 if (dr_active && !(ibdev->dev->flags & MLX4_FLAG_DEV_NUM_STR)) {
2669 ret = sscanf(ibdev->ib_dev.name, "mlx4_%d", &dev_idx);
2673 ib_unregister_device(&ibdev->ib_dev);
2675 spin_lock(&dev_num_str_lock);
2676 bitmap_release_region(dev_num_str_bitmap, dev_idx, 0);
2677 spin_unlock(&dev_num_str_lock);
2680 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2681 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2682 ibdev->steer_qpn_count);
2683 kfree(ibdev->ib_uc_qpns_bitmap);
2686 if (ibdev->iboe.nb.notifier_call) {
2687 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2688 pr_warn("failure unregistering notifier\n");
2689 ibdev->iboe.nb.notifier_call = NULL;
2691 iounmap(ibdev->priv_uar.map);
2693 for (p = 0; p < ibdev->num_ports; ++p) {
2694 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, p + 1) ==
2695 IB_LINK_LAYER_ETHERNET) {
2696 mlx4_counter_free(ibdev->dev,
2698 ibdev->counters[p].counter_index);
2702 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2703 mlx4_CLOSE_PORT(dev, p);
2705 mlx4_ib_free_eqs(dev, ibdev);
2707 mlx4_uar_free(dev, &ibdev->priv_uar);
2708 mlx4_pd_free(dev, ibdev->priv_pdn);
2709 ib_dealloc_device(&ibdev->ib_dev);
2712 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2714 struct mlx4_ib_demux_work **dm = NULL;
2715 struct mlx4_dev *dev = ibdev->dev;
2717 unsigned long flags;
2719 if (!mlx4_is_master(dev))
2722 dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
2724 pr_err("failed to allocate memory for tunneling qp update\n");
2728 for (i = 0; i < dev->caps.num_ports; i++) {
2729 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2731 pr_err("failed to allocate memory for tunneling qp update work struct\n");
2732 for (i = 0; i < dev->caps.num_ports; i++) {
2739 /* initialize or tear down tunnel QPs for the slave */
2740 for (i = 0; i < dev->caps.num_ports; i++) {
2741 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2742 dm[i]->port = i + 1;
2743 dm[i]->slave = slave;
2744 dm[i]->do_init = do_init;
2746 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2747 if (!ibdev->sriov.is_going_down)
2748 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2749 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2757 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2758 enum mlx4_dev_event event, unsigned long param)
2760 struct ib_event ibev;
2761 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
2762 struct mlx4_eqe *eqe = NULL;
2763 struct ib_event_work *ew;
2766 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2767 eqe = (struct mlx4_eqe *)param;
2772 case MLX4_DEV_EVENT_PORT_UP:
2773 if (p > ibdev->num_ports)
2775 if (mlx4_is_master(dev) &&
2776 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2777 IB_LINK_LAYER_INFINIBAND) {
2778 mlx4_ib_invalidate_all_guid_record(ibdev, p);
2780 mlx4_ib_info((struct ib_device *) ibdev_ptr,
2781 "Port %d logical link is up\n", p);
2782 ibev.event = IB_EVENT_PORT_ACTIVE;
2785 case MLX4_DEV_EVENT_PORT_DOWN:
2786 if (p > ibdev->num_ports)
2788 mlx4_ib_info((struct ib_device *) ibdev_ptr,
2789 "Port %d logical link is down\n", p);
2790 ibev.event = IB_EVENT_PORT_ERR;
2793 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2794 ibdev->ib_active = false;
2795 ibev.event = IB_EVENT_DEVICE_FATAL;
2798 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2799 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2801 pr_err("failed to allocate memory for events work\n");
2805 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2806 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2808 /* need to queue only for port owner, which uses GEN_EQE */
2809 if (mlx4_is_master(dev))
2810 queue_work(wq, &ew->work);
2812 handle_port_mgmt_change_event(&ew->work);
2815 case MLX4_DEV_EVENT_SLAVE_INIT:
2816 /* here, p is the slave id */
2817 do_slave_init(ibdev, p, 1);
2820 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2821 /* here, p is the slave id */
2822 do_slave_init(ibdev, p, 0);
2829 ibev.device = ibdev_ptr;
2830 ibev.element.port_num = (u8) p;
2832 ib_dispatch_event(&ibev);
2835 static struct mlx4_interface mlx4_ib_interface = {
2837 .remove = mlx4_ib_remove,
2838 .event = mlx4_ib_event,
2839 .protocol = MLX4_PROT_IB_IPV6
2842 static int __init mlx4_ib_init(void)
2846 wq = create_singlethread_workqueue("mlx4_ib");
2850 err = mlx4_ib_mcg_init();
2856 err = mlx4_register_interface(&mlx4_ib_interface);
2863 mlx4_ib_mcg_destroy();
2866 destroy_workqueue(wq);
2870 static void __exit mlx4_ib_cleanup(void)
2872 mlx4_unregister_interface(&mlx4_ib_interface);
2873 mlx4_ib_mcg_destroy();
2874 destroy_workqueue(wq);
2876 kfree(dev_num_str_bitmap);
2879 module_init_order(mlx4_ib_init, SI_ORDER_MIDDLE);
2880 module_exit(mlx4_ib_cleanup);
2883 mlx4ib_evhand(module_t mod, int event, void *arg)
2888 static moduledata_t mlx4ib_mod = {
2890 .evhand = mlx4ib_evhand,
2893 DECLARE_MODULE(mlx4ib, mlx4ib_mod, SI_SUB_LAST, SI_ORDER_ANY);
2894 MODULE_DEPEND(mlx4ib, mlx4, 1, 1, 1);
2895 MODULE_DEPEND(mlx4ib, ibcore, 1, 1, 1);
2896 MODULE_DEPEND(mlx4ib, linuxkpi, 1, 1, 1);