2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
37 #include <linux/proc_fs.h>
40 #include <linux/slab.h>
41 #include <linux/errno.h>
42 #include <linux/netdevice.h>
43 #include <linux/inetdevice.h>
44 #include <linux/if_vlan.h>
45 #include <linux/bitops.h>
46 #include <linux/if_ether.h>
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
53 #include <linux/mlx4/driver.h>
54 #include <linux/mlx4/cmd.h>
55 #include <linux/sched.h>
60 #define DRV_NAME MLX4_IB_DRV_NAME
61 #define DRV_VERSION "1.0"
62 #define DRV_RELDATE "April 4, 2008"
64 #define MLX4_IB_DRIVER_PROC_DIR_NAME "driver/mlx4_ib"
65 #define MLX4_IB_MRS_PROC_DIR_NAME "mrs"
67 MODULE_AUTHOR("Roland Dreier");
68 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
69 MODULE_LICENSE("Dual BSD/GPL");
70 MODULE_VERSION(DRV_VERSION);
72 int mlx4_ib_sm_guid_assign = 1;
75 struct proc_dir_entry *mlx4_mrs_dir_entry;
76 static struct proc_dir_entry *mlx4_ib_driver_dir_entry;
79 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
80 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
82 static char dev_assign_str[512];
83 //module_param_string(dev_assign_str, dev_assign_str, sizeof(dev_assign_str), 0644);
84 MODULE_PARM_DESC(dev_assign_str, "Map all device function numbers to "
85 "IB device numbers following the pattern: "
86 "bb:dd.f-0,bb:dd.f-1,... (all numbers are hexadecimals)."
87 " Max supported devices - 32");
89 static const char mlx4_ib_version[] =
90 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
91 DRV_VERSION " (" DRV_RELDATE ")\n";
93 struct update_gid_work {
94 struct work_struct work;
95 union ib_gid gids[128];
96 struct mlx4_ib_dev *dev;
108 static struct dev_rec dr[MAX_DR];
110 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
112 static struct workqueue_struct *wq;
114 static void init_query_mad(struct ib_smp *mad)
116 mad->base_version = 1;
117 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
118 mad->class_version = 1;
119 mad->method = IB_MGMT_METHOD_GET;
122 static union ib_gid zgid;
124 static int mlx4_ib_query_device(struct ib_device *ibdev,
125 struct ib_device_attr *props)
127 struct mlx4_ib_dev *dev = to_mdev(ibdev);
128 struct ib_smp *in_mad = NULL;
129 struct ib_smp *out_mad = NULL;
132 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
133 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
134 if (!in_mad || !out_mad)
137 init_query_mad(in_mad);
138 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
140 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
141 1, NULL, NULL, in_mad, out_mad);
145 memset(props, 0, sizeof *props);
147 props->fw_ver = dev->dev->caps.fw_ver;
148 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
149 IB_DEVICE_PORT_ACTIVE_EVENT |
150 IB_DEVICE_SYS_IMAGE_GUID |
151 IB_DEVICE_RC_RNR_NAK_GEN |
152 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK |
155 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
156 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
157 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
158 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
159 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
160 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
161 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
162 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
163 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
164 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
165 if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
166 props->device_cap_flags |= IB_DEVICE_UD_TSO;
167 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
168 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
169 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
170 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
171 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
172 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
173 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
174 props->device_cap_flags |= IB_DEVICE_XRC;
176 props->device_cap_flags |= IB_DEVICE_QPG;
177 if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
178 props->device_cap_flags |= IB_DEVICE_UD_RSS;
179 props->max_rss_tbl_sz = dev->dev->caps.max_rss_tbl_sz;
181 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
183 props->vendor_part_id = dev->dev->pdev->device;
184 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
185 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
187 props->max_mr_size = ~0ull;
188 props->page_size_cap = dev->dev->caps.page_size_cap;
189 props->max_qp = dev->dev->quotas.qp;
190 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
191 props->max_sge = min(dev->dev->caps.max_sq_sg,
192 dev->dev->caps.max_rq_sg);
193 props->max_cq = dev->dev->quotas.cq;
194 props->max_cqe = dev->dev->caps.max_cqes;
195 props->max_mr = dev->dev->quotas.mpt;
196 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
197 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
198 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
199 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
200 props->max_srq = dev->dev->quotas.srq;
201 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
202 props->max_srq_sge = dev->dev->caps.max_srq_sge;
203 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
204 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
205 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
206 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
207 props->masked_atomic_cap = props->atomic_cap;
208 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
209 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
210 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
211 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
212 props->max_mcast_grp;
213 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
222 static enum rdma_link_layer
223 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
225 struct mlx4_dev *dev = to_mdev(device)->dev;
227 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
228 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
231 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
232 struct ib_port_attr *props, int netw_view)
234 struct ib_smp *in_mad = NULL;
235 struct ib_smp *out_mad = NULL;
236 int ext_active_speed;
237 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
240 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
241 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
242 if (!in_mad || !out_mad)
245 init_query_mad(in_mad);
246 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
247 in_mad->attr_mod = cpu_to_be32(port);
249 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
250 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
252 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
258 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
259 props->lmc = out_mad->data[34] & 0x7;
260 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
261 props->sm_sl = out_mad->data[36] & 0xf;
262 props->state = out_mad->data[32] & 0xf;
263 props->phys_state = out_mad->data[33] >> 4;
264 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
266 props->gid_tbl_len = out_mad->data[50];
268 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
269 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
270 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
271 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
272 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
273 props->active_width = out_mad->data[31] & 0xf;
274 props->active_speed = out_mad->data[35] >> 4;
275 props->max_mtu = out_mad->data[41] & 0xf;
276 props->active_mtu = out_mad->data[36] >> 4;
277 props->subnet_timeout = out_mad->data[51] & 0x1f;
278 props->max_vl_num = out_mad->data[37] >> 4;
279 props->init_type_reply = out_mad->data[41] >> 4;
281 /* Check if extended speeds (EDR/FDR/...) are supported */
282 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
283 ext_active_speed = out_mad->data[62] >> 4;
285 switch (ext_active_speed) {
287 props->active_speed = IB_SPEED_FDR;
290 props->active_speed = IB_SPEED_EDR;
295 /* If reported active speed is QDR, check if is FDR-10 */
296 if (props->active_speed == IB_SPEED_QDR) {
297 init_query_mad(in_mad);
298 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
299 in_mad->attr_mod = cpu_to_be32(port);
301 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
302 NULL, NULL, in_mad, out_mad);
306 /* Checking LinkSpeedActive for FDR-10 */
307 if (out_mad->data[15] & 0x1)
308 props->active_speed = IB_SPEED_FDR10;
311 /* Avoid wrong speed value returned by FW if the IB link is down. */
312 if (props->state == IB_PORT_DOWN)
313 props->active_speed = IB_SPEED_SDR;
321 static u8 state_to_phys_state(enum ib_port_state state)
323 return state == IB_PORT_ACTIVE ? 5 : 3;
326 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
327 struct ib_port_attr *props, int netw_view)
330 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
331 struct mlx4_ib_iboe *iboe = &mdev->iboe;
332 struct net_device *ndev;
334 struct mlx4_cmd_mailbox *mailbox;
337 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
339 return PTR_ERR(mailbox);
341 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
342 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
347 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
348 IB_WIDTH_4X : IB_WIDTH_1X;
349 props->active_speed = IB_SPEED_QDR;
350 props->port_cap_flags = IB_PORT_CM_SUP;
352 props->gid_tbl_len = MLX4_ROCE_MAX_GIDS;
354 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
356 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
357 props->pkey_tbl_len = 1;
358 props->max_mtu = IB_MTU_4096;
359 props->max_vl_num = 2;
360 props->state = IB_PORT_DOWN;
361 props->phys_state = state_to_phys_state(props->state);
362 props->active_mtu = IB_MTU_256;
363 spin_lock(&iboe->lock);
364 ndev = iboe->netdevs[port - 1];
368 tmp = iboe_get_mtu(ndev->if_mtu);
369 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
371 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
372 IB_PORT_ACTIVE : IB_PORT_DOWN;
373 props->phys_state = state_to_phys_state(props->state);
375 spin_unlock(&iboe->lock);
377 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
381 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
382 struct ib_port_attr *props, int netw_view)
386 memset(props, 0, sizeof *props);
388 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
389 ib_link_query_port(ibdev, port, props, netw_view) :
390 eth_link_query_port(ibdev, port, props, netw_view);
395 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
396 struct ib_port_attr *props)
398 /* returns host view */
399 return __mlx4_ib_query_port(ibdev, port, props, 0);
402 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
403 union ib_gid *gid, int netw_view)
405 struct ib_smp *in_mad = NULL;
406 struct ib_smp *out_mad = NULL;
408 struct mlx4_ib_dev *dev = to_mdev(ibdev);
410 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
412 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
413 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
414 if (!in_mad || !out_mad)
417 init_query_mad(in_mad);
418 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
419 in_mad->attr_mod = cpu_to_be32(port);
421 if (mlx4_is_mfunc(dev->dev) && netw_view)
422 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
424 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
428 memcpy(gid->raw, out_mad->data + 8, 8);
430 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
432 /* For any index > 0, return the null guid */
439 init_query_mad(in_mad);
440 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
441 in_mad->attr_mod = cpu_to_be32(index / 8);
443 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
444 NULL, NULL, in_mad, out_mad);
448 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
452 memset(gid->raw + 8, 0, 8);
458 static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
461 struct mlx4_ib_dev *dev = to_mdev(ibdev);
463 *gid = dev->iboe.gid_table[port - 1][index];
468 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
471 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
472 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
474 return iboe_query_gid(ibdev, port, index, gid);
477 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
478 u16 *pkey, int netw_view)
480 struct ib_smp *in_mad = NULL;
481 struct ib_smp *out_mad = NULL;
482 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
485 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
486 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
487 if (!in_mad || !out_mad)
490 init_query_mad(in_mad);
491 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
492 in_mad->attr_mod = cpu_to_be32(index / 32);
494 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
495 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
497 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
502 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
510 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
512 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
515 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
516 struct ib_device_modify *props)
518 struct mlx4_cmd_mailbox *mailbox;
521 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
524 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
527 if (mlx4_is_slave(to_mdev(ibdev)->dev))
530 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
531 memcpy(ibdev->node_desc, props->node_desc, 64);
532 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
535 * If possible, pass node desc to FW, so it can generate
536 * a 144 trap. If cmd fails, just ignore.
538 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
542 memset(mailbox->buf, 0, 256);
543 memcpy(mailbox->buf, props->node_desc, 64);
544 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
545 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
547 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
552 static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
555 struct mlx4_cmd_mailbox *mailbox;
557 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
559 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
561 return PTR_ERR(mailbox);
563 memset(mailbox->buf, 0, 256);
565 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
566 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
567 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
569 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
570 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
573 err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
574 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
576 mlx4_free_cmd_mailbox(dev->dev, mailbox);
580 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
581 struct ib_port_modify *props)
583 struct ib_port_attr attr;
587 mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
589 err = mlx4_ib_query_port(ibdev, port, &attr);
593 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
594 ~props->clr_port_cap_mask;
596 err = mlx4_SET_PORT(to_mdev(ibdev), port,
597 !!(mask & IB_PORT_RESET_QKEY_CNTR),
601 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
605 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
606 struct ib_udata *udata)
608 struct mlx4_ib_dev *dev = to_mdev(ibdev);
609 struct mlx4_ib_ucontext *context;
610 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
611 struct mlx4_ib_alloc_ucontext_resp resp;
615 return ERR_PTR(-EAGAIN);
617 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
618 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
619 if (mlx4_wc_enabled()) {
620 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
621 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
623 resp_v3.bf_reg_size = 0;
624 resp_v3.bf_regs_per_page = 0;
627 resp.dev_caps = dev->dev->caps.userspace_caps;
628 resp.qp_tab_size = dev->dev->caps.num_qps;
629 if (mlx4_wc_enabled()) {
630 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
631 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
633 resp.bf_reg_size = 0;
634 resp.bf_regs_per_page = 0;
636 resp.cqe_size = dev->dev->caps.cqe_size;
639 context = kmalloc(sizeof *context, GFP_KERNEL);
641 return ERR_PTR(-ENOMEM);
643 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
649 INIT_LIST_HEAD(&context->db_page_list);
650 mutex_init(&context->db_page_mutex);
652 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
653 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
655 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
658 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
660 return ERR_PTR(-EFAULT);
663 return &context->ibucontext;
666 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
668 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
670 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
676 static unsigned long mlx4_ib_get_unmapped_area(struct file *file,
678 unsigned long len, unsigned long pgoff,
681 struct mm_struct *mm;
682 struct vm_area_struct *vma;
683 unsigned long start_addr;
684 unsigned long page_size_order;
685 unsigned long command;
689 return current->mm->get_unmapped_area(file, addr, len,
692 /* Last 8 bits hold the command others are data per that command */
693 command = pgoff & MLX4_IB_MMAP_CMD_MASK;
694 if (command != MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES)
695 return current->mm->get_unmapped_area(file, addr, len,
698 page_size_order = pgoff >> MLX4_IB_MMAP_CMD_BITS;
699 /* code is based on the huge-pages get_unmapped_area code */
700 start_addr = mm->free_area_cache;
702 if (len <= mm->cached_hole_size)
703 start_addr = TASK_UNMAPPED_BASE;
707 addr = ALIGN(start_addr, 1 << page_size_order);
709 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
710 /* At this point: (!vma || addr < vma->vm_end). */
711 if (TASK_SIZE - len < addr) {
713 * Start a new search - just in case we missed
716 if (start_addr != TASK_UNMAPPED_BASE) {
717 start_addr = TASK_UNMAPPED_BASE;
723 if (!vma || addr + len <= vma->vm_start)
725 addr = ALIGN(vma->vm_end, 1 << page_size_order);
730 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
732 struct mlx4_ib_dev *dev = to_mdev(context->device);
735 /* Last 8 bits hold the command others are data per that command */
736 unsigned long command = vma->vm_pgoff & MLX4_IB_MMAP_CMD_MASK;
738 if (command < MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
739 /* compatability handling for commands 0 & 1*/
740 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
743 if (command == MLX4_IB_MMAP_UAR_PAGE) {
744 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
746 if (io_remap_pfn_range(vma, vma->vm_start,
747 to_mucontext(context)->uar.pfn,
748 PAGE_SIZE, vma->vm_page_prot))
750 } else if (command == MLX4_IB_MMAP_BLUE_FLAME_PAGE &&
751 dev->dev->caps.bf_reg_size != 0) {
752 vma->vm_page_prot = pgprot_wc(vma->vm_page_prot);
754 if (io_remap_pfn_range(vma, vma->vm_start,
755 to_mucontext(context)->uar.pfn +
756 dev->dev->caps.num_uars,
757 PAGE_SIZE, vma->vm_page_prot))
759 } else if (command == MLX4_IB_MMAP_GET_CONTIGUOUS_PAGES) {
760 /* Getting contiguous physical pages */
761 unsigned long total_size = vma->vm_end - vma->vm_start;
762 unsigned long page_size_order = (vma->vm_pgoff) >>
763 MLX4_IB_MMAP_CMD_BITS;
764 struct ib_cmem *ib_cmem;
765 ib_cmem = ib_cmem_alloc_contiguous_pages(context, total_size,
767 if (IS_ERR(ib_cmem)) {
768 err = PTR_ERR(ib_cmem);
772 err = ib_cmem_map_contiguous_pages_to_vma(ib_cmem, vma);
774 ib_cmem_release_contiguous_pages(ib_cmem);
784 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
785 struct ib_ucontext *context,
786 struct ib_udata *udata)
788 struct mlx4_ib_pd *pd;
791 pd = kmalloc(sizeof *pd, GFP_KERNEL);
793 return ERR_PTR(-ENOMEM);
795 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
802 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
803 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
805 return ERR_PTR(-EFAULT);
811 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
813 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
819 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
820 struct ib_ucontext *context,
821 struct ib_udata *udata)
823 struct mlx4_ib_xrcd *xrcd;
826 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
827 return ERR_PTR(-ENOSYS);
829 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
831 return ERR_PTR(-ENOMEM);
833 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
837 xrcd->pd = ib_alloc_pd(ibdev);
838 if (IS_ERR(xrcd->pd)) {
839 err = PTR_ERR(xrcd->pd);
843 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
844 if (IS_ERR(xrcd->cq)) {
845 err = PTR_ERR(xrcd->cq);
849 return &xrcd->ibxrcd;
852 ib_dealloc_pd(xrcd->pd);
854 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
860 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
862 ib_destroy_cq(to_mxrcd(xrcd)->cq);
863 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
864 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
870 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
872 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
873 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
874 struct mlx4_ib_gid_entry *ge;
876 ge = kzalloc(sizeof *ge, GFP_KERNEL);
881 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
882 ge->port = mqp->port;
886 mutex_lock(&mqp->mutex);
887 list_add_tail(&ge->list, &mqp->gid_list);
888 mutex_unlock(&mqp->mutex);
893 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
897 struct net_device *ndev;
903 spin_lock(&mdev->iboe.lock);
904 ndev = mdev->iboe.netdevs[mqp->port - 1];
907 spin_unlock(&mdev->iboe.lock);
910 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
912 dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac, 6, 0);
921 struct mlx4_ib_steering {
922 struct list_head list;
927 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
930 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
931 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
933 struct mlx4_ib_steering *ib_steering = NULL;
935 if (mdev->dev->caps.steering_mode ==
936 MLX4_STEERING_MODE_DEVICE_MANAGED) {
937 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
942 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
944 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
945 MLX4_PROT_IB_IPV6, ®_id);
949 err = add_gid_entry(ibqp, gid);
954 memcpy(ib_steering->gid.raw, gid->raw, 16);
955 ib_steering->reg_id = reg_id;
956 mutex_lock(&mqp->mutex);
957 list_add(&ib_steering->list, &mqp->steering_rules);
958 mutex_unlock(&mqp->mutex);
963 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
964 MLX4_PROT_IB_IPV6, reg_id);
972 IBV_FLOW_L4_NONE = 0,
973 IBV_FLOW_L4_OTHER = 3,
978 struct mlx4_cm_steering {
979 struct list_head list;
981 struct ib_flow_spec spec;
984 static int flow_spec_to_net_rule(struct ib_device *dev, struct ib_flow_spec *flow_spec,
985 struct list_head *rule_list_h)
987 struct mlx4_spec_list *spec_l2, *spec_l3, *spec_l4;
988 u64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
990 spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
994 switch (flow_spec->type) {
996 spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
997 memcpy(spec_l2->eth.dst_mac, flow_spec->l2_id.eth.mac, ETH_ALEN);
998 memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
999 spec_l2->eth.ether_type = flow_spec->l2_id.eth.ethertype;
1000 if (flow_spec->l2_id.eth.vlan_present) {
1001 spec_l2->eth.vlan_id = flow_spec->l2_id.eth.vlan;
1002 spec_l2->eth.vlan_id_msk = cpu_to_be16(0x0fff);
1006 spec_l2->id = MLX4_NET_TRANS_RULE_ID_IB;
1007 if(flow_spec->l2_id.ib_uc.qpn) {
1008 spec_l2->ib.l3_qpn = cpu_to_be32(flow_spec->l2_id.ib_uc.qpn);
1009 spec_l2->ib.qpn_msk = cpu_to_be32(0xffffff);
1012 case IB_FLOW_IB_MC_IPV4:
1013 case IB_FLOW_IB_MC_IPV6:
1014 spec_l2->id = MLX4_NET_TRANS_RULE_ID_IB;
1015 memcpy(spec_l2->ib.dst_gid, flow_spec->l2_id.ib_mc.mgid, 16);
1016 memset(spec_l2->ib.dst_gid_msk, 0xff, 16);
1021 list_add_tail(&spec_l2->list, rule_list_h);
1023 if (flow_spec->l2_id.eth.ethertype == cpu_to_be16(ETH_P_IP) ||
1024 flow_spec->type != IB_FLOW_ETH) {
1025 spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
1029 spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
1030 spec_l3->ipv4.src_ip = flow_spec->src_ip;
1031 if (flow_spec->type != IB_FLOW_IB_MC_IPV4 &&
1032 flow_spec->type != IB_FLOW_IB_MC_IPV6)
1033 spec_l3->ipv4.dst_ip = flow_spec->dst_ip;
1035 if (spec_l3->ipv4.src_ip)
1036 spec_l3->ipv4.src_ip_msk = MLX4_BE_WORD_MASK;
1037 if (spec_l3->ipv4.dst_ip)
1038 spec_l3->ipv4.dst_ip_msk = MLX4_BE_WORD_MASK;
1040 list_add_tail(&spec_l3->list, rule_list_h);
1043 if (flow_spec->l4_protocol) {
1044 spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
1048 spec_l4->tcp_udp.src_port = flow_spec->src_port;
1049 spec_l4->tcp_udp.dst_port = flow_spec->dst_port;
1050 if (spec_l4->tcp_udp.src_port)
1051 spec_l4->tcp_udp.src_port_msk =
1053 if (spec_l4->tcp_udp.dst_port)
1054 spec_l4->tcp_udp.dst_port_msk =
1057 switch (flow_spec->l4_protocol) {
1058 case IBV_FLOW_L4_UDP:
1059 spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
1061 case IBV_FLOW_L4_TCP:
1062 spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
1065 dev_err(dev->dma_device,
1066 "Unsupported l4 protocol.\n");
1068 return -EPROTONOSUPPORT;
1070 list_add_tail(&spec_l4->list, rule_list_h);
1075 static int __mlx4_ib_flow_attach(struct mlx4_ib_dev *mdev,
1076 struct mlx4_ib_qp *mqp,
1077 struct ib_flow_spec *flow_spec,
1078 int priority, int lock_qp)
1082 struct mlx4_cm_steering *cm_flow;
1083 struct mlx4_spec_list *spec, *tmp_spec;
1085 struct mlx4_net_trans_rule rule =
1086 { .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1090 rule.promisc_mode = flow_spec->rule_type;
1091 rule.port = mqp->port;
1092 rule.qpn = mqp->mqp.qpn;
1093 INIT_LIST_HEAD(&rule.list);
1095 cm_flow = kmalloc(sizeof(*cm_flow), GFP_KERNEL);
1099 if (rule.promisc_mode == MLX4_FS_REGULAR) {
1100 rule.allow_loopback = !flow_spec->block_mc_loopback;
1101 rule.priority = MLX4_DOMAIN_UVERBS | priority;
1102 err = flow_spec_to_net_rule(&mdev->ib_dev, flow_spec,
1108 err = mlx4_flow_attach(mdev->dev, &rule, ®_id);
1112 memcpy(&cm_flow->spec, flow_spec, sizeof(*flow_spec));
1113 cm_flow->reg_id = reg_id;
1116 mutex_lock(&mqp->mutex);
1117 list_add(&cm_flow->list, &mqp->rules_list);
1119 mutex_unlock(&mqp->mutex);
1122 list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
1123 list_del(&spec->list);
1128 dev_err(mdev->ib_dev.dma_device,
1129 "Fail to attach flow steering rule\n");
1134 static int __mlx4_ib_flow_detach(struct mlx4_ib_dev *mdev,
1135 struct mlx4_ib_qp *mqp,
1136 struct ib_flow_spec *spec, int priority,
1139 struct mlx4_cm_steering *cm_flow;
1143 mutex_lock(&mqp->mutex);
1144 list_for_each_entry(cm_flow, &mqp->rules_list, list) {
1145 if (!memcmp(&cm_flow->spec, spec, sizeof(*spec))) {
1146 list_del(&cm_flow->list);
1151 mutex_unlock(&mqp->mutex);
1153 if (&cm_flow->list == &mqp->rules_list) {
1154 dev_err(mdev->ib_dev.dma_device, "Couldn't find reg_id for flow spec. "
1155 "Steering rule is left attached\n");
1159 ret = mlx4_flow_detach(mdev->dev, cm_flow->reg_id);
1165 static int mlx4_ib_flow_attach(struct ib_qp *qp, struct ib_flow_spec *flow_spec,
1168 return __mlx4_ib_flow_attach(to_mdev(qp->device), to_mqp(qp),
1169 flow_spec, priority, 1);
1172 static int mlx4_ib_flow_detach(struct ib_qp *qp, struct ib_flow_spec *spec,
1175 return __mlx4_ib_flow_detach(to_mdev(qp->device), to_mqp(qp),
1179 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1181 struct mlx4_ib_gid_entry *ge;
1182 struct mlx4_ib_gid_entry *tmp;
1183 struct mlx4_ib_gid_entry *ret = NULL;
1185 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1186 if (!memcmp(raw, ge->gid.raw, 16)) {
1195 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1198 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1199 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1201 struct net_device *ndev;
1202 struct mlx4_ib_gid_entry *ge;
1205 if (mdev->dev->caps.steering_mode ==
1206 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1207 struct mlx4_ib_steering *ib_steering;
1209 mutex_lock(&mqp->mutex);
1210 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1211 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1212 list_del(&ib_steering->list);
1216 mutex_unlock(&mqp->mutex);
1217 if (&ib_steering->list == &mqp->steering_rules) {
1218 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1221 reg_id = ib_steering->reg_id;
1225 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1226 MLX4_PROT_IB_IPV6, reg_id);
1230 mutex_lock(&mqp->mutex);
1231 ge = find_gid_entry(mqp, gid->raw);
1233 spin_lock(&mdev->iboe.lock);
1234 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1237 spin_unlock(&mdev->iboe.lock);
1238 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
1241 dev_mc_delete(mdev->iboe.netdevs[ge->port - 1], mac, 6, 0);
1245 list_del(&ge->list);
1248 pr_warn("could not find mgid entry\n");
1250 mutex_unlock(&mqp->mutex);
1255 static int init_node_data(struct mlx4_ib_dev *dev)
1257 struct ib_smp *in_mad = NULL;
1258 struct ib_smp *out_mad = NULL;
1259 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1262 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1263 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1264 if (!in_mad || !out_mad)
1267 init_query_mad(in_mad);
1268 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1269 if (mlx4_is_master(dev->dev))
1270 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1272 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1276 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1278 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1280 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1284 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1285 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1293 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1296 struct mlx4_ib_dev *dev =
1297 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1298 return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
1301 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1304 struct mlx4_ib_dev *dev =
1305 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1306 return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1307 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1308 (int) dev->dev->caps.fw_ver & 0xffff);
1311 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1314 struct mlx4_ib_dev *dev =
1315 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1316 return sprintf(buf, "%x\n", dev->dev->rev_id);
1319 static ssize_t show_board(struct device *device, struct device_attribute *attr,
1322 struct mlx4_ib_dev *dev =
1323 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1324 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1325 dev->dev->board_id);
1328 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1329 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1330 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1331 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1333 static struct device_attribute *mlx4_class_attributes[] = {
1340 static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
1343 memcpy(eui, dev->dev_addr, 3);
1344 memcpy(eui + 5, dev->dev_addr + 3, 3);
1346 memcpy(eui, IF_LLADDR(dev), 3);
1347 memcpy(eui + 5, IF_LLADDR(dev) + 3, 3);
1349 if (vlan_id < 0x1000) {
1350 eui[3] = vlan_id >> 8;
1351 eui[4] = vlan_id & 0xff;
1359 static void update_gids_task(struct work_struct *work)
1361 struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
1362 struct mlx4_cmd_mailbox *mailbox;
1365 struct mlx4_dev *dev = gw->dev->dev;
1367 mailbox = mlx4_alloc_cmd_mailbox(dev);
1368 if (IS_ERR(mailbox)) {
1369 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
1373 gids = mailbox->buf;
1374 memcpy(gids, gw->gids, sizeof gw->gids);
1376 err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1377 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1380 pr_warn("set port command failed\n");
1382 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
1383 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
1386 mlx4_free_cmd_mailbox(dev, mailbox);
1390 static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
1392 struct net_device *ndev = dev->iboe.netdevs[port - 1];
1393 struct update_gid_work *work;
1394 struct net_device *tmp;
1400 int need_update = 0;
1404 work = kzalloc(sizeof *work, GFP_ATOMIC);
1408 hits = kzalloc(128, GFP_ATOMIC);
1414 max_gids = dev->dev->caps.gid_table_len[port];
1418 for_each_netdev_rcu(&init_net, tmp) {
1421 TAILQ_FOREACH(tmp, &V_ifnet, if_link) {
1423 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
1424 gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1425 vid = rdma_vlan_dev_vlan_id(tmp);
1426 mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
1429 for (i = 0; i < max_gids; ++i) {
1430 if (index_free < 0 &&
1431 !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1433 if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
1442 (memcmp(&dev->iboe.gid_table[port - 1][0],
1443 &gid, sizeof gid) ||
1444 !memcmp(&dev->iboe.gid_table[port - 1][0],
1445 &zgid, sizeof gid))) {
1446 dev->iboe.gid_table[port - 1][0] = gid;
1449 } else if (index_free >= 0) {
1450 dev->iboe.gid_table[port - 1][index_free] = gid;
1451 hits[index_free] = 1;
1464 for (i = 0; i < max_gids; ++i)
1466 if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1468 dev->iboe.gid_table[port - 1][i] = zgid;
1472 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
1473 INIT_WORK(&work->work, update_gids_task);
1476 queue_work(wq, &work->work);
1484 static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
1489 case NETDEV_CHANGEADDR:
1491 update_ipv6_gids(dev, port, 0);
1495 update_ipv6_gids(dev, port, 1);
1496 dev->iboe.netdevs[port - 1] = NULL;
1500 static void netdev_added(struct mlx4_ib_dev *dev, int port)
1502 update_ipv6_gids(dev, port, 0);
1505 static void netdev_removed(struct mlx4_ib_dev *dev, int port)
1507 update_ipv6_gids(dev, port, 1);
1510 static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
1513 struct net_device *dev = ptr;
1514 struct mlx4_ib_dev *ibdev;
1515 struct net_device *oldnd;
1516 struct mlx4_ib_iboe *iboe;
1520 if (!net_eq(dev_net(dev), &init_net))
1524 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1525 iboe = &ibdev->iboe;
1527 spin_lock(&iboe->lock);
1528 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1529 oldnd = iboe->netdevs[port - 1];
1530 iboe->netdevs[port - 1] =
1531 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1532 if (oldnd != iboe->netdevs[port - 1]) {
1533 if (iboe->netdevs[port - 1])
1534 netdev_added(ibdev, port);
1536 netdev_removed(ibdev, port);
1540 if (dev == iboe->netdevs[0] ||
1541 (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
1542 handle_en_event(ibdev, 1, event);
1543 else if (dev == iboe->netdevs[1]
1544 || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
1545 handle_en_event(ibdev, 2, event);
1547 spin_unlock(&iboe->lock);
1552 static void init_pkeys(struct mlx4_ib_dev *ibdev)
1558 if (mlx4_is_master(ibdev->dev)) {
1559 for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
1560 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1562 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1564 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
1565 /* master has the identity virt2phys pkey mapping */
1566 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
1567 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
1568 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
1569 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
1573 /* initialize pkey cache */
1574 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1576 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1578 ibdev->pkeys.phys_pkey_cache[port-1][i] =
1584 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1587 int eq_per_port = 0;
1592 /* Legacy mode or comp_pool is not large enough */
1593 if (dev->caps.comp_pool == 0 ||
1594 dev->caps.num_ports > dev->caps.comp_pool)
1597 eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
1598 dev->caps.num_ports);
1602 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
1603 added_eqs += eq_per_port;
1605 total_eqs = dev->caps.num_comp_vectors + added_eqs;
1607 ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
1608 if (!ibdev->eq_table)
1611 ibdev->eq_added = added_eqs;
1614 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
1615 for (j = 0; j < eq_per_port; j++) {
1616 snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%d:%d:%d:%d", i, j,
1617 pci_get_domain(dev->pdev->dev.bsddev),
1618 pci_get_bus(dev->pdev->dev.bsddev),
1619 PCI_SLOT(dev->pdev->devfn),
1620 PCI_FUNC(dev->pdev->devfn));
1622 /* Set IRQ for specific name (per ring) */
1623 if (mlx4_assign_eq(dev, name,
1624 &ibdev->eq_table[eq])) {
1625 /* Use legacy (same as mlx4_en driver) */
1626 pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
1627 ibdev->eq_table[eq] =
1628 (eq % dev->caps.num_comp_vectors);
1634 /* Fill the reset of the vector with legacy EQ */
1635 for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
1636 ibdev->eq_table[eq++] = i;
1638 /* Advertise the new number of EQs to clients */
1639 ibdev->ib_dev.num_comp_vectors = total_eqs;
1642 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1646 /* no additional eqs were added */
1647 if (!ibdev->eq_table)
1650 /* Reset the advertised EQ number */
1651 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
1653 /* Free only the added eqs */
1654 for (i = 0; i < ibdev->eq_added; i++) {
1655 /* Don't free legacy eqs if used */
1656 if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
1658 mlx4_release_eq(dev, ibdev->eq_table[i]);
1661 kfree(ibdev->eq_table);
1665 * create show function and a device_attribute struct pointing to
1666 * the function for _name
1668 #define DEVICE_DIAG_RPRT_ATTR(_name, _offset, _op_mod) \
1669 static ssize_t show_rprt_##_name(struct device *dev, \
1670 struct device_attribute *attr, \
1672 return show_diag_rprt(dev, buf, _offset, _op_mod); \
1674 static DEVICE_ATTR(_name, S_IRUGO, show_rprt_##_name, NULL);
1676 #define MLX4_DIAG_RPRT_CLEAR_DIAGS 3
1678 static size_t show_diag_rprt(struct device *device, char *buf,
1679 u32 offset, u8 op_modifier)
1682 u32 counter_offset = offset;
1683 u32 diag_counter = 0;
1684 struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
1687 ret = mlx4_query_diag_counters(dev->dev, 1, op_modifier,
1688 &counter_offset, &diag_counter);
1692 return sprintf(buf, "%d\n", diag_counter);
1695 static ssize_t clear_diag_counters(struct device *device,
1696 struct device_attribute *attr,
1697 const char *buf, size_t length)
1700 struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
1703 ret = mlx4_query_diag_counters(dev->dev, 0, MLX4_DIAG_RPRT_CLEAR_DIAGS,
1711 DEVICE_DIAG_RPRT_ATTR(rq_num_lle , 0x00, 2);
1712 DEVICE_DIAG_RPRT_ATTR(sq_num_lle , 0x04, 2);
1713 DEVICE_DIAG_RPRT_ATTR(rq_num_lqpoe , 0x08, 2);
1714 DEVICE_DIAG_RPRT_ATTR(sq_num_lqpoe , 0x0C, 2);
1715 DEVICE_DIAG_RPRT_ATTR(rq_num_lpe , 0x18, 2);
1716 DEVICE_DIAG_RPRT_ATTR(sq_num_lpe , 0x1C, 2);
1717 DEVICE_DIAG_RPRT_ATTR(rq_num_wrfe , 0x20, 2);
1718 DEVICE_DIAG_RPRT_ATTR(sq_num_wrfe , 0x24, 2);
1719 DEVICE_DIAG_RPRT_ATTR(sq_num_mwbe , 0x2C, 2);
1720 DEVICE_DIAG_RPRT_ATTR(sq_num_bre , 0x34, 2);
1721 DEVICE_DIAG_RPRT_ATTR(rq_num_lae , 0x38, 2);
1722 DEVICE_DIAG_RPRT_ATTR(sq_num_rire , 0x44, 2);
1723 DEVICE_DIAG_RPRT_ATTR(rq_num_rire , 0x48, 2);
1724 DEVICE_DIAG_RPRT_ATTR(sq_num_rae , 0x4C, 2);
1725 DEVICE_DIAG_RPRT_ATTR(rq_num_rae , 0x50, 2);
1726 DEVICE_DIAG_RPRT_ATTR(sq_num_roe , 0x54, 2);
1727 DEVICE_DIAG_RPRT_ATTR(sq_num_tree , 0x5C, 2);
1728 DEVICE_DIAG_RPRT_ATTR(sq_num_rree , 0x64, 2);
1729 DEVICE_DIAG_RPRT_ATTR(rq_num_rnr , 0x68, 2);
1730 DEVICE_DIAG_RPRT_ATTR(sq_num_rnr , 0x6C, 2);
1731 DEVICE_DIAG_RPRT_ATTR(rq_num_oos , 0x100, 2);
1732 DEVICE_DIAG_RPRT_ATTR(sq_num_oos , 0x104, 2);
1733 DEVICE_DIAG_RPRT_ATTR(rq_num_mce , 0x108, 2);
1734 DEVICE_DIAG_RPRT_ATTR(rq_num_udsdprd , 0x118, 2);
1735 DEVICE_DIAG_RPRT_ATTR(rq_num_ucsdprd , 0x120, 2);
1736 DEVICE_DIAG_RPRT_ATTR(num_cqovf , 0x1A0, 2);
1737 DEVICE_DIAG_RPRT_ATTR(num_eqovf , 0x1A4, 2);
1738 DEVICE_DIAG_RPRT_ATTR(num_baddb , 0x1A8, 2);
1740 static DEVICE_ATTR(clear_diag, S_IWUSR, NULL, clear_diag_counters);
1742 static struct attribute *diag_rprt_attrs[] = {
1743 &dev_attr_rq_num_lle.attr,
1744 &dev_attr_sq_num_lle.attr,
1745 &dev_attr_rq_num_lqpoe.attr,
1746 &dev_attr_sq_num_lqpoe.attr,
1747 &dev_attr_rq_num_lpe.attr,
1748 &dev_attr_sq_num_lpe.attr,
1749 &dev_attr_rq_num_wrfe.attr,
1750 &dev_attr_sq_num_wrfe.attr,
1751 &dev_attr_sq_num_mwbe.attr,
1752 &dev_attr_sq_num_bre.attr,
1753 &dev_attr_rq_num_lae.attr,
1754 &dev_attr_sq_num_rire.attr,
1755 &dev_attr_rq_num_rire.attr,
1756 &dev_attr_sq_num_rae.attr,
1757 &dev_attr_rq_num_rae.attr,
1758 &dev_attr_sq_num_roe.attr,
1759 &dev_attr_sq_num_tree.attr,
1760 &dev_attr_sq_num_rree.attr,
1761 &dev_attr_rq_num_rnr.attr,
1762 &dev_attr_sq_num_rnr.attr,
1763 &dev_attr_rq_num_oos.attr,
1764 &dev_attr_sq_num_oos.attr,
1765 &dev_attr_rq_num_mce.attr,
1766 &dev_attr_rq_num_udsdprd.attr,
1767 &dev_attr_rq_num_ucsdprd.attr,
1768 &dev_attr_num_cqovf.attr,
1769 &dev_attr_num_eqovf.attr,
1770 &dev_attr_num_baddb.attr,
1771 &dev_attr_clear_diag.attr,
1775 static struct attribute_group diag_counters_group = {
1776 .name = "diag_counters",
1777 .attrs = diag_rprt_attrs
1781 static int mlx4_ib_proc_init(void)
1783 /* Creating procfs directories /proc/drivers/mlx4_ib/ &&
1784 /proc/drivers/mlx4_ib/mrs for further use by the driver.
1788 mlx4_ib_driver_dir_entry = proc_mkdir(MLX4_IB_DRIVER_PROC_DIR_NAME,
1790 if (!mlx4_ib_driver_dir_entry) {
1791 pr_err("mlx4_ib_proc_init has failed for %s\n",
1792 MLX4_IB_DRIVER_PROC_DIR_NAME);
1797 mlx4_mrs_dir_entry = proc_mkdir(MLX4_IB_MRS_PROC_DIR_NAME,
1798 mlx4_ib_driver_dir_entry);
1799 if (!mlx4_mrs_dir_entry) {
1800 pr_err("mlx4_ib_proc_init has failed for %s\n",
1801 MLX4_IB_MRS_PROC_DIR_NAME);
1809 remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME,
1816 static void init_dev_assign(void)
1818 int bus, slot, fn, ib_idx;
1819 char *p = dev_assign_str, *t;
1820 char curr_val[32] = {0};
1824 memset(dr, 0, sizeof dr);
1826 if (dev_assign_str[0] == 0)
1830 ret = sscanf(p, "%02x:%02x.%x-%x", &bus, &slot, &fn, &ib_idx);
1831 if (ret != 4 || ib_idx < 0)
1834 for (j = 0; j < i; j++)
1835 if (dr[j].nr == ib_idx)
1844 sprintf(curr_val, "%02x:%02x.%x-%x", bus, slot, fn, ib_idx);
1845 if ((!t) && strlen(p) == strlen(curr_val))
1848 if (!t || (t + 1) >= dev_assign_str + sizeof dev_assign_str)
1860 memset(dr, 0, sizeof dr);
1861 printk(KERN_WARNING "mlx4_ib: The value of 'dev_assign_str' parameter "
1862 "is incorrect. The parameter value is discarded!");
1865 static void *mlx4_ib_add(struct mlx4_dev *dev)
1867 struct mlx4_ib_dev *ibdev;
1871 struct mlx4_ib_iboe *iboe;
1873 printk(KERN_INFO "%s", mlx4_ib_version);
1875 mlx4_foreach_ib_transport_port(i, dev)
1878 /* No point in registering a device with no ports... */
1882 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
1884 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
1888 iboe = &ibdev->iboe;
1890 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
1893 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
1896 ibdev->priv_uar.map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT,
1899 if (!ibdev->priv_uar.map)
1902 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
1906 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
1907 ibdev->ib_dev.owner = THIS_MODULE;
1908 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
1909 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
1910 ibdev->num_ports = num_ports;
1911 ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
1912 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
1913 ibdev->ib_dev.dma_device = &dev->pdev->dev;
1915 if (dev->caps.userspace_caps)
1916 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
1918 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
1920 ibdev->ib_dev.uverbs_cmd_mask =
1921 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1922 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1923 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1924 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1925 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1926 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1927 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1928 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1929 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1930 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1931 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1932 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1933 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1934 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1935 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1936 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1937 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1938 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1939 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1940 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1941 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1942 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
1943 (1ull << IB_USER_VERBS_CMD_OPEN_QP) |
1944 (1ull << IB_USER_VERBS_CMD_ATTACH_FLOW) |
1945 (1ull << IB_USER_VERBS_CMD_DETACH_FLOW) |
1946 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1948 ibdev->ib_dev.query_device = mlx4_ib_query_device;
1949 ibdev->ib_dev.query_port = mlx4_ib_query_port;
1950 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
1951 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
1952 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
1953 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
1954 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
1955 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
1956 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
1957 ibdev->ib_dev.mmap = mlx4_ib_mmap;
1959 ibdev->ib_dev.get_unmapped_area = mlx4_ib_get_unmapped_area;
1961 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
1962 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
1963 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
1964 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
1965 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
1966 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
1967 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
1968 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
1969 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
1970 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
1971 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
1972 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
1973 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
1974 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
1975 ibdev->ib_dev.post_send = mlx4_ib_post_send;
1976 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
1977 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
1978 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
1979 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
1980 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
1981 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
1982 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
1983 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
1984 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
1985 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
1986 ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
1987 ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
1988 ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
1989 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
1990 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
1991 ibdev->ib_dev.attach_flow = mlx4_ib_flow_attach;
1992 ibdev->ib_dev.detach_flow = mlx4_ib_flow_detach;
1993 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
1995 if (!mlx4_is_slave(ibdev->dev)) {
1996 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
1997 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
1998 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
1999 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2002 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2003 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2004 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2005 ibdev->ib_dev.uverbs_cmd_mask |=
2006 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2007 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2010 mlx4_ib_alloc_eqs(dev, ibdev);
2012 spin_lock_init(&iboe->lock);
2014 if (init_node_data(ibdev))
2017 for (i = 0; i < ibdev->num_ports; ++i) {
2018 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2019 IB_LINK_LAYER_ETHERNET) {
2020 err = mlx4_counter_alloc(ibdev->dev, i + 1, &ibdev->counters[i]);
2022 ibdev->counters[i] = -1;
2024 ibdev->counters[i] = -1;
2027 spin_lock_init(&ibdev->sm_lock);
2028 mutex_init(&ibdev->cap_mask_mutex);
2030 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2031 !mlx4_is_slave(dev)) {
2032 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2033 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2034 MLX4_IB_UC_STEER_QPN_ALIGN, &ibdev->steer_qpn_base, 0);
2038 ibdev->ib_uc_qpns_bitmap =
2039 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2042 if (!ibdev->ib_uc_qpns_bitmap) {
2043 dev_err(&dev->pdev->dev, "bit map alloc failed\n");
2044 goto err_steer_qp_release;
2047 bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2049 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(dev, ibdev->steer_qpn_base,
2050 ibdev->steer_qpn_base + ibdev->steer_qpn_count - 1);
2052 goto err_steer_free_bitmap;
2055 if (ib_register_device(&ibdev->ib_dev, NULL))
2056 goto err_steer_free_bitmap;
2058 if (mlx4_ib_mad_init(ibdev))
2061 if (mlx4_ib_init_sriov(ibdev))
2064 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
2065 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2066 err = register_netdevice_notifier(&iboe->nb);
2071 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2072 if (device_create_file(&ibdev->ib_dev.dev,
2073 mlx4_class_attributes[j]))
2076 if (sysfs_create_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group))
2079 ibdev->ib_active = true;
2081 if (mlx4_is_mfunc(ibdev->dev))
2084 /* create paravirt contexts for any VFs which are active */
2085 if (mlx4_is_master(ibdev->dev)) {
2086 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2087 if (j == mlx4_master_func_num(ibdev->dev))
2089 if (mlx4_is_slave_active(ibdev->dev, j))
2090 do_slave_init(ibdev, j, 1);
2096 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2097 pr_warn("failure unregistering notifier\n");
2098 flush_workqueue(wq);
2101 mlx4_ib_close_sriov(ibdev);
2104 mlx4_ib_mad_cleanup(ibdev);
2107 ib_unregister_device(&ibdev->ib_dev);
2109 err_steer_free_bitmap:
2110 kfree(ibdev->ib_uc_qpns_bitmap);
2112 err_steer_qp_release:
2113 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED)
2114 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2115 ibdev->steer_qpn_count);
2118 if (ibdev->counters[i - 1] != -1)
2119 mlx4_counter_free(ibdev->dev, i, ibdev->counters[i - 1]);
2122 iounmap(ibdev->priv_uar.map);
2123 mlx4_ib_free_eqs(dev, ibdev);
2126 mlx4_uar_free(dev, &ibdev->priv_uar);
2129 mlx4_pd_free(dev, ibdev->priv_pdn);
2132 ib_dealloc_device(&ibdev->ib_dev);
2137 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2141 WARN_ON(!dev->ib_uc_qpns_bitmap);
2143 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2144 dev->steer_qpn_count,
2145 get_count_order(count));
2149 *qpn = dev->steer_qpn_base + offset;
2153 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2156 dev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
2159 BUG_ON(qpn < dev->steer_qpn_base);
2161 bitmap_release_region(dev->ib_uc_qpns_bitmap,
2162 qpn - dev->steer_qpn_base, get_count_order(count));
2165 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2168 struct ib_flow_spec spec = {
2169 .type = IB_FLOW_IB_UC,
2170 .l2_id.ib_uc.qpn = mqp->ibqp.qp_num,
2174 __mlx4_ib_flow_attach(mdev, mqp, &spec, MLX4_DOMAIN_NIC, 0)
2175 : __mlx4_ib_flow_detach(mdev, mqp, &spec, MLX4_DOMAIN_NIC, 0);
2178 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2180 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2183 mlx4_ib_close_sriov(ibdev);
2184 sysfs_remove_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group);
2185 mlx4_ib_mad_cleanup(ibdev);
2187 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2188 device_remove_file(&ibdev->ib_dev.dev, mlx4_class_attributes[j]);
2191 ib_unregister_device(&ibdev->ib_dev);
2193 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2194 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2195 ibdev->steer_qpn_count);
2196 kfree(ibdev->ib_uc_qpns_bitmap);
2199 if (ibdev->iboe.nb.notifier_call) {
2200 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2201 pr_warn("failure unregistering notifier\n");
2202 ibdev->iboe.nb.notifier_call = NULL;
2204 iounmap(ibdev->priv_uar.map);
2205 for (p = 0; p < ibdev->num_ports; ++p)
2206 if (ibdev->counters[p] != -1)
2207 mlx4_counter_free(ibdev->dev, p + 1, ibdev->counters[p]);
2208 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2209 mlx4_CLOSE_PORT(dev, p);
2211 mlx4_ib_free_eqs(dev, ibdev);
2213 mlx4_uar_free(dev, &ibdev->priv_uar);
2214 mlx4_pd_free(dev, ibdev->priv_pdn);
2215 ib_dealloc_device(&ibdev->ib_dev);
2218 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2220 struct mlx4_ib_demux_work **dm = NULL;
2221 struct mlx4_dev *dev = ibdev->dev;
2223 unsigned long flags;
2225 if (!mlx4_is_master(dev))
2228 dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
2230 pr_err("failed to allocate memory for tunneling qp update\n");
2234 for (i = 0; i < dev->caps.num_ports; i++) {
2235 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2237 pr_err("failed to allocate memory for tunneling qp update work struct\n");
2238 for (i = 0; i < dev->caps.num_ports; i++) {
2245 /* initialize or tear down tunnel QPs for the slave */
2246 for (i = 0; i < dev->caps.num_ports; i++) {
2247 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2248 dm[i]->port = i + 1;
2249 dm[i]->slave = slave;
2250 dm[i]->do_init = do_init;
2252 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2253 if (!ibdev->sriov.is_going_down)
2254 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2255 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2263 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2264 enum mlx4_dev_event event, unsigned long param)
2266 struct ib_event ibev;
2267 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
2268 struct mlx4_eqe *eqe = NULL;
2269 struct ib_event_work *ew;
2272 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2273 eqe = (struct mlx4_eqe *)param;
2278 case MLX4_DEV_EVENT_PORT_UP:
2279 if (p > ibdev->num_ports)
2281 if (mlx4_is_master(dev) &&
2282 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2283 IB_LINK_LAYER_INFINIBAND) {
2284 mlx4_ib_invalidate_all_guid_record(ibdev, p);
2286 mlx4_ib_info((struct ib_device *) ibdev_ptr,
2287 "Port %d logical link is up\n", p);
2288 ibev.event = IB_EVENT_PORT_ACTIVE;
2291 case MLX4_DEV_EVENT_PORT_DOWN:
2292 if (p > ibdev->num_ports)
2294 mlx4_ib_info((struct ib_device *) ibdev_ptr,
2295 "Port %d logical link is down\n", p);
2296 ibev.event = IB_EVENT_PORT_ERR;
2299 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2300 ibdev->ib_active = false;
2301 ibev.event = IB_EVENT_DEVICE_FATAL;
2304 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2305 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2307 pr_err("failed to allocate memory for events work\n");
2311 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2312 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2314 /* need to queue only for port owner, which uses GEN_EQE */
2315 if (mlx4_is_master(dev))
2316 queue_work(wq, &ew->work);
2318 handle_port_mgmt_change_event(&ew->work);
2321 case MLX4_DEV_EVENT_SLAVE_INIT:
2322 /* here, p is the slave id */
2323 do_slave_init(ibdev, p, 1);
2326 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2327 /* here, p is the slave id */
2328 do_slave_init(ibdev, p, 0);
2335 ibev.device = ibdev_ptr;
2336 ibev.element.port_num = (u8) p;
2338 ib_dispatch_event(&ibev);
2341 static struct mlx4_interface mlx4_ib_interface = {
2343 .remove = mlx4_ib_remove,
2344 .event = mlx4_ib_event,
2345 .protocol = MLX4_PROT_IB_IPV6
2348 static int __init mlx4_ib_init(void)
2352 wq = create_singlethread_workqueue("mlx4_ib");
2357 err = mlx4_ib_proc_init();
2362 err = mlx4_ib_mcg_init();
2368 err = mlx4_register_interface(&mlx4_ib_interface);
2375 mlx4_ib_mcg_destroy();
2379 remove_proc_entry(MLX4_IB_MRS_PROC_DIR_NAME,
2380 mlx4_ib_driver_dir_entry);
2381 remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME, NULL);
2385 destroy_workqueue(wq);
2389 static void __exit mlx4_ib_cleanup(void)
2391 mlx4_unregister_interface(&mlx4_ib_interface);
2392 mlx4_ib_mcg_destroy();
2393 destroy_workqueue(wq);
2395 /* Remove proc entries */
2397 remove_proc_entry(MLX4_IB_MRS_PROC_DIR_NAME,
2398 mlx4_ib_driver_dir_entry);
2399 remove_proc_entry(MLX4_IB_DRIVER_PROC_DIR_NAME, NULL);
2404 module_init_order(mlx4_ib_init, SI_ORDER_MIDDLE);
2405 module_exit(mlx4_ib_cleanup);
2407 #undef MODULE_VERSION
2408 #include <sys/module.h>
2410 mlx4ib_evhand(module_t mod, int event, void *arg)
2415 static moduledata_t mlx4ib_mod = {
2417 .evhand = mlx4ib_evhand,
2420 DECLARE_MODULE(mlx4ib, mlx4ib_mod, SI_SUB_OFED_PREINIT, SI_ORDER_ANY);
2421 MODULE_DEPEND(mlx4ib, mlx4, 1, 1, 1);
2422 MODULE_DEPEND(mlx4ib, ibcore, 1, 1, 1);