2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/if_vlan.h>
42 #include <rdma/ib_smi.h>
43 #include <rdma/ib_user_verbs.h>
44 #include <rdma/ib_addr.h>
46 #include <linux/mlx4/driver.h>
47 #include <linux/mlx4/cmd.h>
53 #define DRV_NAME MLX4_IB_DRV_NAME
54 #define DRV_VERSION "1.0-ofed1.5.2"
55 #define DRV_RELDATE "August 4, 2010"
57 MODULE_AUTHOR("Roland Dreier");
58 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
59 MODULE_LICENSE("Dual BSD/GPL");
60 MODULE_VERSION(DRV_VERSION);
62 #ifdef CONFIG_MLX4_DEBUG
64 int mlx4_ib_debug_level = 0;
65 module_param_named(debug_level, mlx4_ib_debug_level, int, 0644);
66 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
68 #endif /* CONFIG_MLX4_DEBUG */
70 static const char mlx4_ib_version[] =
71 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
72 DRV_VERSION " (" DRV_RELDATE ")\n";
74 static void *get_ibdev(struct mlx4_dev *dev, void *ctx, u8 port)
76 struct mlx4_ib_dev *mlxibdev = ctx;
77 return &mlxibdev->ib_dev;
80 struct update_gid_work {
81 struct work_struct work;
82 union ib_gid gids[128];
84 struct mlx4_ib_dev *dev;
87 static struct workqueue_struct *wq;
89 static void init_query_mad(struct ib_smp *mad)
91 mad->base_version = 1;
92 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
93 mad->class_version = 1;
94 mad->method = IB_MGMT_METHOD_GET;
97 static union ib_gid zgid;
99 static int mlx4_ib_query_device(struct ib_device *ibdev,
100 struct ib_device_attr *props)
102 struct mlx4_ib_dev *dev = to_mdev(ibdev);
103 struct ib_smp *in_mad = NULL;
104 struct ib_smp *out_mad = NULL;
107 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
108 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
109 if (!in_mad || !out_mad)
112 init_query_mad(in_mad);
113 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
115 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
119 memset(props, 0, sizeof *props);
121 props->fw_ver = dev->dev->caps.fw_ver;
122 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
123 IB_DEVICE_PORT_ACTIVE_EVENT |
124 IB_DEVICE_SYS_IMAGE_GUID |
125 IB_DEVICE_RC_RNR_NAK_GEN |
126 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
127 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
128 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
129 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
130 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
131 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
132 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
133 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
134 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
135 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
136 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
137 if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
138 props->device_cap_flags |= IB_DEVICE_UD_TSO;
139 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
140 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
141 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
142 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
143 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
144 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
145 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
146 props->device_cap_flags |= IB_DEVICE_XRC;
147 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_RAW_ETY)
148 props->max_raw_ethy_qp = dev->ib_dev.phys_port_cnt;
150 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
152 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
153 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
154 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
156 props->max_mr_size = ~0ull;
157 props->page_size_cap = dev->dev->caps.page_size_cap;
158 props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
159 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
160 props->max_sge = min(dev->dev->caps.max_sq_sg,
161 dev->dev->caps.max_rq_sg);
162 props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
163 props->max_cqe = dev->dev->caps.max_cqes;
164 props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
165 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
166 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
167 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
168 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
169 props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
170 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
171 props->max_srq_sge = dev->dev->caps.max_srq_sge;
172 props->max_fast_reg_page_list_len = MAX_FAST_REG_PAGES;
173 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
174 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
175 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
176 props->masked_atomic_cap = IB_ATOMIC_HCA;
177 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
178 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
179 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
180 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
181 props->max_mcast_grp;
182 props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1;
191 static enum rdma_link_layer
192 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
194 struct mlx4_dev *dev = to_mdev(device)->dev;
196 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
197 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
200 static void ib_link_query_port(struct ib_device *ibdev, u8 port,
201 struct ib_port_attr *props,
202 struct ib_smp *out_mad)
204 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
205 props->lmc = out_mad->data[34] & 0x7;
206 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
207 props->sm_sl = out_mad->data[36] & 0xf;
208 props->state = out_mad->data[32] & 0xf;
209 props->phys_state = out_mad->data[33] >> 4;
210 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
211 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
212 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
213 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
214 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
215 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
216 props->active_width = out_mad->data[31] & 0xf;
217 props->active_speed = out_mad->data[35] >> 4;
218 props->max_mtu = out_mad->data[41] & 0xf;
219 props->active_mtu = out_mad->data[36] >> 4;
220 props->subnet_timeout = out_mad->data[51] & 0x1f;
221 props->max_vl_num = out_mad->data[37] >> 4;
222 props->init_type_reply = out_mad->data[41] >> 4;
223 props->link_layer = IB_LINK_LAYER_INFINIBAND;
227 static int eth_to_ib_width(int w)
242 static int eth_to_ib_speed(int s)
257 static u8 state_to_phys_state(enum ib_port_state state)
259 return state == IB_PORT_ACTIVE ? 5 : 3;
262 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
263 struct ib_port_attr *props,
264 struct ib_smp *out_mad)
266 struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe;
267 struct net_device *ndev;
270 props->active_width = IB_WIDTH_4X;
271 props->active_speed = 1;
272 props->port_cap_flags = IB_PORT_CM_SUP;
273 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
274 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
275 props->pkey_tbl_len = 1;
276 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
277 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
278 props->max_mtu = IB_MTU_2048;
279 props->subnet_timeout = 0;
280 props->max_vl_num = out_mad->data[37] >> 4;
281 props->init_type_reply = 0;
282 props->link_layer = IB_LINK_LAYER_ETHERNET;
283 props->state = IB_PORT_DOWN;
284 props->phys_state = state_to_phys_state(props->state);
285 props->active_mtu = IB_MTU_256;
286 spin_lock(&iboe->lock);
287 ndev = iboe->netdevs[port - 1];
292 tmp = iboe_get_mtu(ndev->mtu);
294 tmp = iboe_get_mtu(ndev->if_mtu);
296 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
297 props->state = netif_carrier_ok(ndev) && netif_oper_up(ndev) ?
298 IB_PORT_ACTIVE : IB_PORT_DOWN;
299 props->phys_state = state_to_phys_state(props->state);
302 spin_unlock(&iboe->lock);
306 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
307 struct ib_port_attr *props)
309 struct ib_smp *in_mad = NULL;
310 struct ib_smp *out_mad = NULL;
313 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
314 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
315 if (!in_mad || !out_mad)
318 memset(props, 0, sizeof *props);
320 init_query_mad(in_mad);
321 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
322 in_mad->attr_mod = cpu_to_be32(port);
324 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
328 mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
329 ib_link_query_port(ibdev, port, props, out_mad) :
330 eth_link_query_port(ibdev, port, props, out_mad);
339 static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
342 struct ib_smp *in_mad = NULL;
343 struct ib_smp *out_mad = NULL;
346 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
347 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
348 if (!in_mad || !out_mad)
351 init_query_mad(in_mad);
352 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
353 in_mad->attr_mod = cpu_to_be32(port);
355 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
359 memcpy(gid->raw, out_mad->data + 8, 8);
361 init_query_mad(in_mad);
362 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
363 in_mad->attr_mod = cpu_to_be32(index / 8);
365 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
369 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
377 static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
380 struct mlx4_ib_dev *dev = to_mdev(ibdev);
382 *gid = dev->iboe.gid_table[port - 1][index];
387 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
390 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
391 return __mlx4_ib_query_gid(ibdev, port, index, gid);
393 return iboe_query_gid(ibdev, port, index, gid);
396 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
399 struct ib_smp *in_mad = NULL;
400 struct ib_smp *out_mad = NULL;
403 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
404 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
405 if (!in_mad || !out_mad)
408 init_query_mad(in_mad);
409 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
410 in_mad->attr_mod = cpu_to_be32(index / 32);
412 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
416 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
424 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
425 struct ib_device_modify *props)
427 struct mlx4_cmd_mailbox *mailbox;
430 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
433 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
436 spin_lock(&to_mdev(ibdev)->sm_lock);
437 memcpy(ibdev->node_desc, props->node_desc, 64);
438 spin_unlock(&to_mdev(ibdev)->sm_lock);
440 /* if possible, pass node desc to FW, so it can generate
441 * a 144 trap. If cmd fails, just ignore.
443 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
447 memset(mailbox->buf, 0, 256);
448 memcpy(mailbox->buf, props->node_desc, 64);
449 err = mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
450 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
452 mlx4_ib_dbg("SET_NODE command failed (%d)", err);
454 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
459 static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
462 struct mlx4_cmd_mailbox *mailbox;
464 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
466 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
468 return PTR_ERR(mailbox);
470 memset(mailbox->buf, 0, 256);
472 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
473 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
474 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
476 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
477 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
480 err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
481 MLX4_CMD_TIME_CLASS_B);
483 mlx4_free_cmd_mailbox(dev->dev, mailbox);
487 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
488 struct ib_port_modify *props)
490 struct ib_port_attr attr;
494 mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
496 err = mlx4_ib_query_port(ibdev, port, &attr);
500 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
501 ~props->clr_port_cap_mask;
503 err = mlx4_SET_PORT(to_mdev(ibdev), port,
504 !!(mask & IB_PORT_RESET_QKEY_CNTR),
508 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
512 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
513 struct ib_udata *udata)
515 struct mlx4_ib_dev *dev = to_mdev(ibdev);
516 struct mlx4_ib_ucontext *context;
517 struct mlx4_ib_alloc_ucontext_resp resp;
521 return ERR_PTR(-EAGAIN);
523 resp.qp_tab_size = dev->dev->caps.num_qps;
525 if (mlx4_wc_enabled()) {
526 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
527 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
529 resp.bf_reg_size = 0;
530 resp.bf_regs_per_page = 0;
533 context = kzalloc(sizeof *context, GFP_KERNEL);
535 return ERR_PTR(-ENOMEM);
537 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
543 INIT_LIST_HEAD(&context->db_page_list);
544 mutex_init(&context->db_page_mutex);
546 err = ib_copy_to_udata(udata, &resp, sizeof resp);
548 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
550 return ERR_PTR(-EFAULT);
553 return &context->ibucontext;
556 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
558 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
560 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
566 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
568 struct mlx4_ib_dev *dev = to_mdev(context->device);
570 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
573 if (vma->vm_pgoff == 0) {
574 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
576 if (io_remap_pfn_range(vma, vma->vm_start,
577 to_mucontext(context)->uar.pfn,
578 PAGE_SIZE, vma->vm_page_prot))
580 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
581 vma->vm_page_prot = pgprot_wc(vma->vm_page_prot);
583 if (io_remap_pfn_range(vma, vma->vm_start,
584 to_mucontext(context)->uar.pfn +
585 dev->dev->caps.num_uars,
586 PAGE_SIZE, vma->vm_page_prot))
594 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
595 struct ib_ucontext *context,
596 struct ib_udata *udata)
598 struct mlx4_ib_pd *pd;
601 pd = kzalloc(sizeof *pd, GFP_KERNEL);
603 return ERR_PTR(-ENOMEM);
605 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
612 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
613 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
615 return ERR_PTR(-EFAULT);
621 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
623 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
629 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
631 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
632 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
633 struct gid_entry *ge;
635 ge = kzalloc(sizeof *ge, GFP_KERNEL);
640 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
641 ge->port = mqp->port;
645 mutex_lock(&mqp->mutex);
646 list_add_tail(&ge->list, &mqp->gid_list);
647 mutex_unlock(&mqp->mutex);
652 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
656 struct net_device *ndev;
661 spin_lock(&mdev->iboe.lock);
662 ndev = mdev->iboe.netdevs[mqp->port - 1];
665 spin_unlock(&mdev->iboe.lock);
667 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
669 dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac, 6, 0);
678 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
681 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
682 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
684 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags &
685 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
686 (ibqp->qp_type == IB_QPT_RAW_ETH) ?
687 MLX4_MCAST_PROT_EN : MLX4_MCAST_PROT_IB);
691 err = add_gid_entry(ibqp, gid);
698 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
699 (ibqp->qp_type == IB_QPT_RAW_ETH) ?
700 MLX4_MCAST_PROT_EN : MLX4_MCAST_PROT_IB);
704 static struct gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
706 struct gid_entry *ge;
707 struct gid_entry *tmp;
708 struct gid_entry *ret = NULL;
710 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
711 if (!memcmp(raw, ge->gid.raw, 16)) {
720 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
723 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
724 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
726 struct net_device *ndev;
727 struct gid_entry *ge;
729 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
730 (ibqp->qp_type == IB_QPT_RAW_ETH) ?
731 MLX4_MCAST_PROT_EN : MLX4_MCAST_PROT_IB);
735 mutex_lock(&mqp->mutex);
736 ge = find_gid_entry(mqp, gid->raw);
738 spin_lock(&mdev->iboe.lock);
739 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
742 spin_unlock(&mdev->iboe.lock);
743 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
746 dev_mc_delete(mdev->iboe.netdevs[ge->port - 1], mac, 6, 0);
753 printk(KERN_WARNING "could not find mgid entry\n");
755 mutex_unlock(&mqp->mutex);
760 static void mlx4_dummy_comp_handler(struct ib_cq *cq, void *cq_context)
764 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
765 struct ib_ucontext *context,
766 struct ib_udata *udata)
768 struct mlx4_ib_xrcd *xrcd;
769 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
774 if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
775 return ERR_PTR(-ENOSYS);
777 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
779 return ERR_PTR(-ENOMEM);
781 err = mlx4_xrcd_alloc(mdev->dev, &xrcd->xrcdn);
785 pd = mlx4_ib_alloc_pd(ibdev, NULL, NULL);
792 cq = mlx4_ib_create_cq(ibdev, 1, 0, NULL, NULL);
798 cq->comp_handler = mlx4_dummy_comp_handler;
801 if (ib_copy_to_udata(udata, &xrcd->xrcdn, sizeof(__u32))) {
808 return &xrcd->ibxrcd;
811 mlx4_ib_destroy_cq(cq);
813 mlx4_ib_dealloc_pd(pd);
815 mlx4_xrcd_free(mdev->dev, xrcd->xrcdn);
821 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
823 struct mlx4_ib_xrcd *mxrcd = to_mxrcd(xrcd);
825 mlx4_ib_destroy_cq(mxrcd->cq);
826 mlx4_ib_dealloc_pd(mxrcd->pd);
827 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
834 static int init_node_data(struct mlx4_ib_dev *dev)
836 struct ib_smp *in_mad = NULL;
837 struct ib_smp *out_mad = NULL;
840 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
841 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
842 if (!in_mad || !out_mad)
845 init_query_mad(in_mad);
846 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
848 err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
852 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
854 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
856 err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
860 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
861 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
869 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
872 struct mlx4_ib_dev *dev =
873 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
874 return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
877 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
880 struct mlx4_ib_dev *dev =
881 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
882 return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
883 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
884 (int) dev->dev->caps.fw_ver & 0xffff);
887 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
890 struct mlx4_ib_dev *dev =
891 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
892 return sprintf(buf, "%x\n", dev->dev->rev_id);
895 static ssize_t show_board(struct device *device, struct device_attribute *attr,
898 struct mlx4_ib_dev *dev =
899 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
900 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
904 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
905 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
906 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
907 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
909 static struct device_attribute *mlx4_class_attributes[] = {
917 * create show function and a device_attribute struct pointing to
918 * the function for _name
920 #define DEVICE_DIAG_RPRT_ATTR(_name, _offset, _op_mod) \
921 static ssize_t show_rprt_##_name(struct device *dev, \
922 struct device_attribute *attr, \
924 return show_diag_rprt(dev, buf, _offset, _op_mod); \
926 static DEVICE_ATTR(_name, S_IRUGO, show_rprt_##_name, NULL);
928 #define MLX4_DIAG_RPRT_CLEAR_DIAGS 3
930 static size_t show_diag_rprt(struct device *device, char *buf,
931 u32 offset, u8 op_modifier)
934 u32 counter_offset = offset;
935 u32 diag_counter = 0;
936 struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
939 ret = mlx4_query_diag_counters(dev->dev, 1, op_modifier,
940 &counter_offset, &diag_counter);
944 return sprintf(buf,"%d\n", diag_counter);
947 static ssize_t clear_diag_counters(struct device *device,
948 struct device_attribute *attr,
949 const char *buf, size_t length)
952 struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev,
955 ret = mlx4_query_diag_counters(dev->dev, 0, MLX4_DIAG_RPRT_CLEAR_DIAGS,
963 DEVICE_DIAG_RPRT_ATTR(rq_num_lle , 0x00, 2);
964 DEVICE_DIAG_RPRT_ATTR(sq_num_lle , 0x04, 2);
965 DEVICE_DIAG_RPRT_ATTR(rq_num_lqpoe , 0x08, 2);
966 DEVICE_DIAG_RPRT_ATTR(sq_num_lqpoe , 0x0C, 2);
967 DEVICE_DIAG_RPRT_ATTR(rq_num_leeoe , 0x10, 2);
968 DEVICE_DIAG_RPRT_ATTR(sq_num_leeoe , 0x14, 2);
969 DEVICE_DIAG_RPRT_ATTR(rq_num_lpe , 0x18, 2);
970 DEVICE_DIAG_RPRT_ATTR(sq_num_lpe , 0x1C, 2);
971 DEVICE_DIAG_RPRT_ATTR(rq_num_wrfe , 0x20, 2);
972 DEVICE_DIAG_RPRT_ATTR(sq_num_wrfe , 0x24, 2);
973 DEVICE_DIAG_RPRT_ATTR(sq_num_mwbe , 0x2C, 2);
974 DEVICE_DIAG_RPRT_ATTR(sq_num_bre , 0x34, 2);
975 DEVICE_DIAG_RPRT_ATTR(rq_num_lae , 0x38, 2);
976 DEVICE_DIAG_RPRT_ATTR(sq_num_rire , 0x44, 2);
977 DEVICE_DIAG_RPRT_ATTR(rq_num_rire , 0x48, 2);
978 DEVICE_DIAG_RPRT_ATTR(sq_num_rae , 0x4C, 2);
979 DEVICE_DIAG_RPRT_ATTR(rq_num_rae , 0x50, 2);
980 DEVICE_DIAG_RPRT_ATTR(sq_num_roe , 0x54, 2);
981 DEVICE_DIAG_RPRT_ATTR(sq_num_tree , 0x5C, 2);
982 DEVICE_DIAG_RPRT_ATTR(sq_num_rree , 0x64, 2);
983 DEVICE_DIAG_RPRT_ATTR(rq_num_rnr , 0x68, 2);
984 DEVICE_DIAG_RPRT_ATTR(sq_num_rnr , 0x6C, 2);
985 DEVICE_DIAG_RPRT_ATTR(sq_num_rabrte , 0x7C, 2);
986 DEVICE_DIAG_RPRT_ATTR(sq_num_ieecne , 0x84, 2);
987 DEVICE_DIAG_RPRT_ATTR(sq_num_ieecse , 0x8C, 2);
988 DEVICE_DIAG_RPRT_ATTR(rq_num_oos , 0x100, 2);
989 DEVICE_DIAG_RPRT_ATTR(sq_num_oos , 0x104, 2);
990 DEVICE_DIAG_RPRT_ATTR(rq_num_mce , 0x108, 2);
991 DEVICE_DIAG_RPRT_ATTR(rq_num_rsync , 0x110, 2);
992 DEVICE_DIAG_RPRT_ATTR(sq_num_rsync , 0x114, 2);
993 DEVICE_DIAG_RPRT_ATTR(rq_num_udsdprd , 0x118, 2);
994 DEVICE_DIAG_RPRT_ATTR(rq_num_ucsdprd , 0x120, 2);
995 DEVICE_DIAG_RPRT_ATTR(num_cqovf , 0x1A0, 2);
996 DEVICE_DIAG_RPRT_ATTR(num_eqovf , 0x1A4, 2);
997 DEVICE_DIAG_RPRT_ATTR(num_baddb , 0x1A8, 2);
999 static DEVICE_ATTR(clear_diag, S_IWUGO, NULL, clear_diag_counters);
1001 static struct attribute *diag_rprt_attrs[] = {
1002 &dev_attr_rq_num_lle.attr,
1003 &dev_attr_sq_num_lle.attr,
1004 &dev_attr_rq_num_lqpoe.attr,
1005 &dev_attr_sq_num_lqpoe.attr,
1006 &dev_attr_rq_num_leeoe.attr,
1007 &dev_attr_sq_num_leeoe.attr,
1008 &dev_attr_rq_num_lpe.attr,
1009 &dev_attr_sq_num_lpe.attr,
1010 &dev_attr_rq_num_wrfe.attr,
1011 &dev_attr_sq_num_wrfe.attr,
1012 &dev_attr_sq_num_mwbe.attr,
1013 &dev_attr_sq_num_bre.attr,
1014 &dev_attr_rq_num_lae.attr,
1015 &dev_attr_sq_num_rire.attr,
1016 &dev_attr_rq_num_rire.attr,
1017 &dev_attr_sq_num_rae.attr,
1018 &dev_attr_rq_num_rae.attr,
1019 &dev_attr_sq_num_roe.attr,
1020 &dev_attr_sq_num_tree.attr,
1021 &dev_attr_sq_num_rree.attr,
1022 &dev_attr_rq_num_rnr.attr,
1023 &dev_attr_sq_num_rnr.attr,
1024 &dev_attr_sq_num_rabrte.attr,
1025 &dev_attr_sq_num_ieecne.attr,
1026 &dev_attr_sq_num_ieecse.attr,
1027 &dev_attr_rq_num_oos.attr,
1028 &dev_attr_sq_num_oos.attr,
1029 &dev_attr_rq_num_mce.attr,
1030 &dev_attr_rq_num_rsync.attr,
1031 &dev_attr_sq_num_rsync.attr,
1032 &dev_attr_rq_num_udsdprd.attr,
1033 &dev_attr_rq_num_ucsdprd.attr,
1034 &dev_attr_num_cqovf.attr,
1035 &dev_attr_num_eqovf.attr,
1036 &dev_attr_num_baddb.attr,
1037 &dev_attr_clear_diag.attr,
1041 struct attribute_group diag_counters_group = {
1042 .name = "diag_counters",
1043 .attrs = diag_rprt_attrs
1046 static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
1049 memcpy(eui, dev->dev_addr, 3);
1050 memcpy(eui + 5, dev->dev_addr + 3, 3);
1052 memcpy(eui, IF_LLADDR(dev), 3);
1053 memcpy(eui + 5, IF_LLADDR(dev) + 3, 3);
1055 if (vlan_id < 0x1000) {
1056 eui[3] = vlan_id >> 8;
1057 eui[4] = vlan_id & 0xff;
1065 static void update_gids_task(struct work_struct *work)
1067 struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
1068 struct mlx4_cmd_mailbox *mailbox;
1071 struct mlx4_dev *dev = gw->dev->dev;
1072 struct ib_event event;
1074 mailbox = mlx4_alloc_cmd_mailbox(dev);
1075 if (IS_ERR(mailbox)) {
1076 printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox));
1080 gids = mailbox->buf;
1081 memcpy(gids, gw->gids, sizeof gw->gids);
1083 err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1084 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
1086 printk(KERN_WARNING "set port command failed\n");
1088 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
1089 event.device = &gw->dev->ib_dev;
1090 event.element.port_num = gw->port;
1091 event.event = IB_EVENT_GID_CHANGE;
1092 ib_dispatch_event(&event);
1095 mlx4_free_cmd_mailbox(dev, mailbox);
1100 MLX4_MAX_EFF_VLANS = 128 - MLX4_VLAN_REGULAR,
1103 static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
1105 struct net_device *ndev = dev->iboe.netdevs[port - 1];
1106 struct update_gid_work *work;
1107 struct net_device *tmp;
1114 int need_update = 0;
1117 work = kzalloc(sizeof *work, GFP_ATOMIC);
1121 hits = kzalloc(MLX4_MAX_EFF_VLANS + 1, GFP_ATOMIC);
1128 read_lock(&dev_base_lock);
1129 for_each_netdev(&init_net, tmp) {
1132 TAILQ_FOREACH(tmp, &V_ifnet, if_link) {
1134 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
1135 gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1136 vid = rdma_vlan_dev_vlan_id(tmp);
1137 mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
1140 for (i = 0; i < MLX4_MAX_EFF_VLANS + 1; ++i) {
1142 !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1144 if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
1152 if (tmp == ndev && (memcmp(&dev->iboe.gid_table[port - 1][0], &gid, sizeof gid) || !memcmp(&dev->iboe.gid_table[port - 1][0], &zgid, sizeof gid))) {
1153 dev->iboe.gid_table[port - 1][0] = gid;
1156 } else if (tofree >= 0) {
1157 dev->iboe.gid_table[port - 1][tofree] = gid;
1165 read_unlock(&dev_base_lock);
1171 for (i = 0; i < MLX4_MAX_EFF_VLANS + 1; ++i)
1173 if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
1175 dev->iboe.gid_table[port - 1][i] = zgid;
1180 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
1181 INIT_WORK(&work->work, update_gids_task);
1184 queue_work(wq, &work->work);
1196 static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
1201 case NETDEV_CHANGEADDR:
1203 update_ipv6_gids(dev, port, 0);
1207 update_ipv6_gids(dev, port, 1);
1208 dev->iboe.netdevs[port - 1] = NULL;
1212 static void netdev_added(struct mlx4_ib_dev *dev, int port)
1214 update_ipv6_gids(dev, port, 0);
1217 static void netdev_removed(struct mlx4_ib_dev *dev, int port)
1219 update_ipv6_gids(dev, port, 1);
1222 static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
1225 struct net_device *dev = ptr;
1226 struct mlx4_ib_dev *ibdev;
1227 struct net_device *oldnd;
1228 struct mlx4_ib_iboe *iboe;
1232 if (!net_eq(dev_net(dev), &init_net))
1236 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1237 iboe = &ibdev->iboe;
1239 spin_lock(&iboe->lock);
1240 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1241 oldnd = iboe->netdevs[port - 1];
1242 iboe->netdevs[port - 1] = mlx4_get_prot_dev(ibdev->dev, MLX4_PROT_EN, port);
1243 if (oldnd != iboe->netdevs[port - 1]) {
1244 if (iboe->netdevs[port - 1])
1245 netdev_added(ibdev, port);
1247 netdev_removed(ibdev, port);
1251 if (dev == iboe->netdevs[0] ||
1252 (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
1253 handle_en_event(ibdev, 1, event);
1254 else if (dev == iboe->netdevs[1]
1255 || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
1256 handle_en_event(ibdev, 2, event);
1258 spin_unlock(&iboe->lock);
1263 static void *mlx4_ib_add(struct mlx4_dev *dev)
1265 static int mlx4_ib_version_printed;
1266 struct mlx4_ib_dev *ibdev;
1270 struct mlx4_ib_iboe *iboe;
1273 if (!mlx4_ib_version_printed) {
1274 printk(KERN_INFO "%s", mlx4_ib_version);
1275 ++mlx4_ib_version_printed;
1278 mlx4_foreach_ib_transport_port(i, dev)
1281 /* No point in registering a device with no ports... */
1285 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
1287 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
1291 iboe = &ibdev->iboe;
1293 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
1296 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
1299 ibdev->priv_uar.map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1300 if (!ibdev->priv_uar.map)
1302 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
1306 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
1307 ibdev->ib_dev.owner = THIS_MODULE;
1308 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
1309 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
1310 ibdev->num_ports = num_ports;
1311 ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
1312 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
1313 ibdev->ib_dev.dma_device = &dev->pdev->dev;
1315 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
1316 ibdev->ib_dev.uverbs_cmd_mask =
1317 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1318 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1319 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1320 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1321 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1322 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1323 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1324 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1325 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1326 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1327 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1328 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1329 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1330 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1331 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1332 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1333 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1334 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1335 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1336 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1337 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1339 ibdev->ib_dev.query_device = mlx4_ib_query_device;
1340 ibdev->ib_dev.query_port = mlx4_ib_query_port;
1341 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
1342 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
1343 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
1344 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
1345 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
1346 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
1347 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
1348 ibdev->ib_dev.mmap = mlx4_ib_mmap;
1349 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
1350 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
1351 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
1352 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
1353 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
1354 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
1355 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
1356 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
1357 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
1358 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
1359 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
1360 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
1361 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
1362 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
1363 ibdev->ib_dev.post_send = mlx4_ib_post_send;
1364 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
1365 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
1366 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
1367 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
1368 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
1369 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
1370 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
1371 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
1372 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
1373 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
1374 ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
1375 ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
1376 ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
1377 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
1378 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
1379 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
1381 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
1382 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
1383 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
1384 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
1385 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
1386 ibdev->ib_dev.create_xrc_srq = mlx4_ib_create_xrc_srq;
1387 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
1388 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
1389 ibdev->ib_dev.create_xrc_rcv_qp = mlx4_ib_create_xrc_rcv_qp;
1390 ibdev->ib_dev.modify_xrc_rcv_qp = mlx4_ib_modify_xrc_rcv_qp;
1391 ibdev->ib_dev.query_xrc_rcv_qp = mlx4_ib_query_xrc_rcv_qp;
1392 ibdev->ib_dev.reg_xrc_rcv_qp = mlx4_ib_reg_xrc_rcv_qp;
1393 ibdev->ib_dev.unreg_xrc_rcv_qp = mlx4_ib_unreg_xrc_rcv_qp;
1394 ibdev->ib_dev.uverbs_cmd_mask |=
1395 (1ull << IB_USER_VERBS_CMD_CREATE_XRC_SRQ) |
1396 (1ull << IB_USER_VERBS_CMD_OPEN_XRC_DOMAIN) |
1397 (1ull << IB_USER_VERBS_CMD_CLOSE_XRC_DOMAIN) |
1398 (1ull << IB_USER_VERBS_CMD_CREATE_XRC_RCV_QP) |
1399 (1ull << IB_USER_VERBS_CMD_MODIFY_XRC_RCV_QP) |
1400 (1ull << IB_USER_VERBS_CMD_QUERY_XRC_RCV_QP) |
1401 (1ull << IB_USER_VERBS_CMD_REG_XRC_RCV_QP) |
1402 (1ull << IB_USER_VERBS_CMD_UNREG_XRC_RCV_QP);
1406 spin_lock_init(&iboe->lock);
1407 if (init_node_data(ibdev))
1410 for (k = 0; k < ibdev->num_ports; ++k) {
1411 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[k]);
1413 ibdev->counters[k] = -1;
1415 mlx4_set_iboe_counter(dev, ibdev->counters[k], k + 1);
1418 spin_lock_init(&ibdev->sm_lock);
1419 mutex_init(&ibdev->cap_mask_mutex);
1420 mutex_init(&ibdev->xrc_reg_mutex);
1422 if (ib_register_device(&ibdev->ib_dev))
1425 if (mlx4_ib_mad_init(ibdev))
1427 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
1428 iboe->nb.notifier_call = mlx4_ib_netdev_event;
1429 err = register_netdevice_notifier(&iboe->nb);
1433 for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
1434 if (device_create_file(&ibdev->ib_dev.dev,
1435 mlx4_class_attributes[i]))
1439 if(sysfs_create_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group))
1442 ibdev->ib_active = 1;
1447 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
1448 printk(KERN_WARNING "failure unregistering notifier\n");
1449 flush_workqueue(wq);
1452 ib_unregister_device(&ibdev->ib_dev);
1456 mlx4_counter_free(ibdev->dev, ibdev->counters[k - 1]);
1459 iounmap(ibdev->priv_uar.map);
1462 mlx4_uar_free(dev, &ibdev->priv_uar);
1465 mlx4_pd_free(dev, ibdev->priv_pdn);
1468 ib_dealloc_device(&ibdev->ib_dev);
1473 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
1475 struct mlx4_ib_dev *ibdev = ibdev_ptr;
1479 sysfs_remove_group(&ibdev->ib_dev.dev.kobj, &diag_counters_group);
1481 mlx4_ib_mad_cleanup(ibdev);
1482 ib_unregister_device(&ibdev->ib_dev);
1483 for (k = 0; k < ibdev->num_ports; ++k)
1484 mlx4_counter_free(ibdev->dev, ibdev->counters[k]);
1486 if (ibdev->iboe.nb.notifier_call) {
1487 unregister_netdevice_notifier(&ibdev->iboe.nb);
1488 flush_workqueue(wq);
1489 ibdev->iboe.nb.notifier_call = NULL;
1491 iounmap(ibdev->priv_uar.map);
1493 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
1494 mlx4_CLOSE_PORT(dev, p);
1496 mlx4_uar_free(dev, &ibdev->priv_uar);
1497 mlx4_pd_free(dev, ibdev->priv_pdn);
1498 ib_dealloc_device(&ibdev->ib_dev);
1501 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
1502 enum mlx4_dev_event event, int port)
1504 struct ib_event ibev;
1505 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
1507 if (port > ibdev->num_ports)
1511 case MLX4_DEV_EVENT_PORT_UP:
1512 ibev.event = IB_EVENT_PORT_ACTIVE;
1515 case MLX4_DEV_EVENT_PORT_DOWN:
1516 ibev.event = IB_EVENT_PORT_ERR;
1519 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
1520 ibdev->ib_active = 0;
1521 ibev.event = IB_EVENT_DEVICE_FATAL;
1528 ibev.device = ibdev_ptr;
1529 ibev.element.port_num = port;
1531 ib_dispatch_event(&ibev);
1534 static struct mlx4_interface mlx4_ib_interface = {
1536 .remove = mlx4_ib_remove,
1537 .event = mlx4_ib_event,
1538 .get_prot_dev = get_ibdev,
1539 .protocol = MLX4_PROT_IB,
1542 static int __init mlx4_ib_init(void)
1546 wq = create_singlethread_workqueue("mlx4_ib");
1550 err = mlx4_register_interface(&mlx4_ib_interface);
1552 destroy_workqueue(wq);
1559 static void __exit mlx4_ib_cleanup(void)
1561 mlx4_unregister_interface(&mlx4_ib_interface);
1562 destroy_workqueue(wq);
1565 module_init_order(mlx4_ib_init, SI_ORDER_MIDDLE);
1566 module_exit(mlx4_ib_cleanup);
1568 #undef MODULE_VERSION
1569 #include <sys/module.h>
1571 mlx4ib_evhand(module_t mod, int event, void *arg)
1575 static moduledata_t mlx4ib_mod = {
1577 .evhand = mlx4ib_evhand,
1579 DECLARE_MODULE(mlx4ib, mlx4ib_mod, SI_SUB_SMP, SI_ORDER_ANY);
1580 MODULE_DEPEND(mlx4ib, mlx4, 1, 1, 1);