2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/io-mapping.h>
43 #include <linux/mlx4/device.h>
44 #include <linux/mlx4/doorbell.h>
50 MODULE_AUTHOR("Roland Dreier");
51 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
52 MODULE_LICENSE("Dual BSD/GPL");
53 MODULE_VERSION(DRV_VERSION);
55 struct workqueue_struct *mlx4_wq;
57 #ifdef CONFIG_MLX4_DEBUG
59 int mlx4_debug_level = 0;
60 module_param_named(debug_level, mlx4_debug_level, int, 0644);
61 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
63 #endif /* CONFIG_MLX4_DEBUG */
66 module_param_named(block_loopback, mlx4_blck_lb, int, 0644);
67 MODULE_PARM_DESC(block_loopback, "Block multicast loopback packets if > 0");
72 module_param(msi_x, int, 0444);
73 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
75 #else /* CONFIG_PCI_MSI */
79 #endif /* CONFIG_PCI_MSI */
81 static char mlx4_version[] __devinitdata =
82 DRV_NAME ": Mellanox ConnectX core driver v"
83 DRV_VERSION " (" DRV_RELDATE ")\n";
85 struct mutex drv_mutex;
87 static struct mlx4_profile default_profile = {
90 .rdmarc_per_qp = 1 << 4,
97 static int log_num_mac = 2;
98 module_param_named(log_num_mac, log_num_mac, int, 0444);
99 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
102 module_param_named(use_prio, use_prio, bool, 0444);
103 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
106 static struct mlx4_profile mod_param_profile = { 0 };
108 module_param_named(log_num_qp, mod_param_profile.num_qp, int, 0444);
109 MODULE_PARM_DESC(log_num_qp, "log maximum number of QPs per HCA");
111 module_param_named(log_num_srq, mod_param_profile.num_srq, int, 0444);
112 MODULE_PARM_DESC(log_num_srq, "log maximum number of SRQs per HCA");
114 module_param_named(log_rdmarc_per_qp, mod_param_profile.rdmarc_per_qp, int, 0444);
115 MODULE_PARM_DESC(log_rdmarc_per_qp, "log number of RDMARC buffers per QP");
117 module_param_named(log_num_cq, mod_param_profile.num_cq, int, 0444);
118 MODULE_PARM_DESC(log_num_cq, "log maximum number of CQs per HCA");
120 module_param_named(log_num_mcg, mod_param_profile.num_mcg, int, 0444);
121 MODULE_PARM_DESC(log_num_mcg, "log maximum number of multicast groups per HCA");
123 module_param_named(log_num_mpt, mod_param_profile.num_mpt, int, 0444);
124 MODULE_PARM_DESC(log_num_mpt,
125 "log maximum number of memory protection table entries per HCA");
127 module_param_named(log_num_mtt, mod_param_profile.num_mtt, int, 0444);
128 MODULE_PARM_DESC(log_num_mtt,
129 "log maximum number of memory translation table segments per HCA");
131 static int log_mtts_per_seg = 0;
132 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
133 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
135 static void process_mod_param_profile(void)
137 default_profile.num_qp = (mod_param_profile.num_qp ?
138 1 << mod_param_profile.num_qp :
139 default_profile.num_qp);
140 default_profile.num_srq = (mod_param_profile.num_srq ?
141 1 << mod_param_profile.num_srq :
142 default_profile.num_srq);
143 default_profile.rdmarc_per_qp = (mod_param_profile.rdmarc_per_qp ?
144 1 << mod_param_profile.rdmarc_per_qp :
145 default_profile.rdmarc_per_qp);
146 default_profile.num_cq = (mod_param_profile.num_cq ?
147 1 << mod_param_profile.num_cq :
148 default_profile.num_cq);
149 default_profile.num_mcg = (mod_param_profile.num_mcg ?
150 1 << mod_param_profile.num_mcg :
151 default_profile.num_mcg);
152 default_profile.num_mpt = (mod_param_profile.num_mpt ?
153 1 << mod_param_profile.num_mpt :
154 default_profile.num_mpt);
155 default_profile.num_mtt = (mod_param_profile.num_mtt ?
156 1 << mod_param_profile.num_mtt :
157 default_profile.num_mtt);
160 struct mlx4_port_config
162 struct list_head list;
163 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
164 struct pci_dev *pdev;
166 static LIST_HEAD(config_list);
168 static void mlx4_config_cleanup(void)
170 struct mlx4_port_config *config, *tmp;
172 list_for_each_entry_safe(config, tmp, &config_list, list) {
173 list_del(&config->list);
178 void *mlx4_get_prot_dev(struct mlx4_dev *dev, enum mlx4_prot proto, int port)
180 return mlx4_find_get_prot_dev(dev, proto, port);
182 EXPORT_SYMBOL(mlx4_get_prot_dev);
184 void mlx4_set_iboe_counter(struct mlx4_dev *dev, int index, u8 port)
186 struct mlx4_priv *priv = mlx4_priv(dev);
188 priv->iboe_counter_index[port - 1] = index;
190 EXPORT_SYMBOL(mlx4_set_iboe_counter);
192 int mlx4_get_iboe_counter(struct mlx4_dev *dev, u8 port)
194 struct mlx4_priv *priv = mlx4_priv(dev);
196 return priv->iboe_counter_index[port - 1];
198 EXPORT_SYMBOL(mlx4_get_iboe_counter);
200 int mlx4_check_port_params(struct mlx4_dev *dev,
201 enum mlx4_port_type *port_type)
205 for (i = 0; i < dev->caps.num_ports - 1; i++) {
206 if (port_type[i] != port_type[i + 1]) {
207 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
208 mlx4_err(dev, "Only same port types supported "
209 "on this HCA, aborting.\n");
212 if (port_type[i] == MLX4_PORT_TYPE_ETH &&
213 port_type[i + 1] == MLX4_PORT_TYPE_IB)
218 for (i = 0; i < dev->caps.num_ports; i++) {
219 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
220 mlx4_err(dev, "Requested port type for port %d is not "
221 "supported on this HCA\n", i + 1);
228 static void mlx4_set_port_mask(struct mlx4_dev *dev)
232 for (i = 1; i <= dev->caps.num_ports; ++i)
233 dev->caps.port_mask[i] = dev->caps.port_type[i];
236 static u8 get_counters_mode(u64 flags)
238 switch (flags >> 48 & 3) {
241 return MLX4_CUNTERS_EXT;
243 return MLX4_CUNTERS_BASIC;
245 return MLX4_CUNTERS_DISABLED;
249 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
254 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
256 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
260 if (dev_cap->min_page_sz > PAGE_SIZE) {
261 mlx4_err(dev, "HCA minimum page size of %d bigger than "
262 "kernel PAGE_SIZE of %d, aborting.\n",
263 dev_cap->min_page_sz, PAGE_SIZE);
266 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
267 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
269 dev_cap->num_ports, MLX4_MAX_PORTS);
273 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
274 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
275 "PCI resource 2 size of 0x%llx, aborting.\n",
277 (unsigned long long) pci_resource_len(dev->pdev, 2));
281 dev->caps.num_ports = dev_cap->num_ports;
282 for (i = 1; i <= dev->caps.num_ports; ++i) {
283 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
284 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
285 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
286 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
287 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
288 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
289 dev->caps.def_mac[i] = dev_cap->def_mac[i];
290 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
291 dev->caps.trans_type[i] = dev_cap->trans_type[i];
292 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
293 dev->caps.wavelength[i] = dev_cap->wavelength[i];
294 dev->caps.trans_code[i] = dev_cap->trans_code[i];
297 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
298 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
299 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
300 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
301 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
302 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
303 dev->caps.max_wqes = dev_cap->max_qp_sz;
304 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
305 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
306 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
307 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
308 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
309 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
310 dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
312 * Subtract 1 from the limit because we need to allocate a
313 * spare CQE so the HCA HW can tell the difference between an
314 * empty CQ and a full CQ.
316 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
317 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
318 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
319 dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
320 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
321 dev->caps.mtts_per_seg);
322 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
323 dev->caps.reserved_uars = dev_cap->reserved_uars;
324 dev->caps.reserved_pds = dev_cap->reserved_pds;
325 dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
326 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
327 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
328 dev->caps.flags = dev_cap->flags;
329 dev->caps.bmme_flags = dev_cap->bmme_flags;
330 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
331 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
332 dev->caps.udp_rss = dev_cap->udp_rss;
333 dev->caps.loopback_support = dev_cap->loopback_support;
334 dev->caps.wol = dev_cap->wol;
335 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
336 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
337 dev_cap->reserved_xrcds : 0;
338 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
339 dev_cap->max_xrcds : 0;
341 dev->caps.log_num_macs = log_num_mac;
342 dev->caps.log_num_prios = use_prio ? 3 : 0;
344 for (i = 1; i <= dev->caps.num_ports; ++i) {
345 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
346 if (dev->caps.supported_type[i]) {
347 if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
348 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
350 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
352 dev->caps.possible_type[i] = dev->caps.port_type[i];
353 mlx4_priv(dev)->sense.sense_allowed[i] =
354 dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
356 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
357 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
358 mlx4_warn(dev, "Requested number of MACs is too much "
359 "for port %d, reducing to %d.\n",
360 i, 1 << dev->caps.log_num_macs);
362 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
365 dev->caps.counters_mode = get_counters_mode(dev_cap->flags);
366 dev->caps.max_basic_counters = 1 << ilog2(dev_cap->max_basic_counters);
367 dev->caps.max_ext_counters = 1 << ilog2(dev_cap->max_ext_counters);
369 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
370 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
371 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
372 (1 << dev->caps.log_num_macs) *
373 (1 << dev->caps.log_num_vlans) *
374 (1 << dev->caps.log_num_prios) *
377 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
378 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
379 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR];
384 static int mlx4_save_config(struct mlx4_dev *dev)
386 struct mlx4_port_config *config;
389 list_for_each_entry(config, &config_list, list) {
390 if (config->pdev == dev->pdev) {
391 for (i = 1; i <= dev->caps.num_ports; i++)
392 config->port_type[i] = dev->caps.possible_type[i];
397 config = kmalloc(sizeof(struct mlx4_port_config), GFP_KERNEL);
401 config->pdev = dev->pdev;
402 for (i = 1; i <= dev->caps.num_ports; i++)
403 config->port_type[i] = dev->caps.possible_type[i];
405 list_add_tail(&config->list, &config_list);
411 * Change the port configuration of the device.
412 * Every user of this function must hold the port mutex.
414 int mlx4_change_port_types(struct mlx4_dev *dev,
415 enum mlx4_port_type *port_types)
421 for (port = 0; port < dev->caps.num_ports; port++) {
422 /* Change the port type only if the new type is different
423 * from the current, and not set to Auto */
424 if (port_types[port] != dev->caps.port_type[port + 1]) {
426 dev->caps.port_type[port + 1] = port_types[port];
430 mlx4_unregister_device(dev);
431 for (port = 1; port <= dev->caps.num_ports; port++) {
432 mlx4_CLOSE_PORT(dev, port);
433 err = mlx4_SET_PORT(dev, port);
435 mlx4_err(dev, "Failed to set port %d, "
440 mlx4_set_port_mask(dev);
441 mlx4_save_config(dev);
442 err = mlx4_register_device(dev);
449 static ssize_t show_port_type(struct device *dev,
450 struct device_attribute *attr,
453 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
455 struct mlx4_dev *mdev = info->dev;
459 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
461 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
462 sprintf(buf, "auto (%s)\n", type);
464 sprintf(buf, "%s\n", type);
469 static ssize_t set_port_type(struct device *dev,
470 struct device_attribute *attr,
471 const char *buf, size_t count)
473 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
475 struct mlx4_dev *mdev = info->dev;
476 struct mlx4_priv *priv = mlx4_priv(mdev);
477 enum mlx4_port_type types[MLX4_MAX_PORTS];
478 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
482 if (!strcmp(buf, "ib\n"))
483 info->tmp_type = MLX4_PORT_TYPE_IB;
484 else if (!strcmp(buf, "eth\n"))
485 info->tmp_type = MLX4_PORT_TYPE_ETH;
486 else if (!strcmp(buf, "auto\n"))
487 info->tmp_type = MLX4_PORT_TYPE_AUTO;
489 mlx4_err(mdev, "%s is not supported port type\n", buf);
493 mlx4_stop_sense(mdev);
494 mutex_lock(&priv->port_mutex);
495 /* Possible type is always the one that was delivered */
496 mdev->caps.possible_type[info->port] = info->tmp_type;
498 for (i = 0; i < mdev->caps.num_ports; i++) {
499 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
500 mdev->caps.possible_type[i+1];
501 if (types[i] == MLX4_PORT_TYPE_AUTO)
502 types[i] = mdev->caps.port_type[i+1];
506 if (++priv->changed_ports < mdev->caps.num_ports)
509 priv->trig = priv->changed_ports = 0;
512 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
513 for (i = 1; i <= mdev->caps.num_ports; i++) {
514 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
515 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
521 mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
522 "Set only 'eth' or 'ib' for both ports "
523 "(should be the same)\n");
527 mlx4_do_sense_ports(mdev, new_types, types);
529 err = mlx4_check_port_params(mdev, new_types);
533 /* We are about to apply the changes after the configuration
534 * was verified, no need to remember the temporary types
536 for (i = 0; i < mdev->caps.num_ports; i++)
537 priv->port[i + 1].tmp_type = 0;
539 err = mlx4_change_port_types(mdev, new_types);
542 mlx4_start_sense(mdev);
543 mutex_unlock(&priv->port_mutex);
544 return err ? err : count;
547 static ssize_t trigger_port(struct device *dev, struct device_attribute *attr,
548 const char *buf, size_t count)
550 struct pci_dev *pdev = to_pci_dev(dev);
551 struct mlx4_dev *mdev = pci_get_drvdata(pdev);
552 struct mlx4_priv *priv = container_of(mdev, struct mlx4_priv, dev);
557 mutex_lock(&priv->port_mutex);
559 mutex_unlock(&priv->port_mutex);
562 DEVICE_ATTR(port_trigger, S_IWUGO, NULL, trigger_port);
564 static int mlx4_load_fw(struct mlx4_dev *dev)
566 struct mlx4_priv *priv = mlx4_priv(dev);
569 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
570 GFP_HIGHUSER | __GFP_NOWARN, 0);
571 if (!priv->fw.fw_icm) {
572 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
576 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
578 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
582 err = mlx4_RUN_FW(dev);
584 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
594 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
598 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
601 struct mlx4_priv *priv = mlx4_priv(dev);
604 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
606 ((u64) (MLX4_CMPT_TYPE_QP *
607 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
608 cmpt_entry_sz, dev->caps.num_qps,
609 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
614 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
616 ((u64) (MLX4_CMPT_TYPE_SRQ *
617 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
618 cmpt_entry_sz, dev->caps.num_srqs,
619 dev->caps.reserved_srqs, 0, 0);
623 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
625 ((u64) (MLX4_CMPT_TYPE_CQ *
626 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
627 cmpt_entry_sz, dev->caps.num_cqs,
628 dev->caps.reserved_cqs, 0, 0);
632 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
634 ((u64) (MLX4_CMPT_TYPE_EQ *
635 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
637 dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
644 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
647 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
650 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
656 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
657 struct mlx4_init_hca_param *init_hca, u64 icm_size)
659 struct mlx4_priv *priv = mlx4_priv(dev);
663 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
665 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
669 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
670 (unsigned long long) icm_size >> 10,
671 (unsigned long long) aux_pages << 2);
673 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
674 GFP_HIGHUSER | __GFP_NOWARN, 0);
675 if (!priv->fw.aux_icm) {
676 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
680 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
682 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
686 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
688 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
692 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
693 init_hca->eqc_base, dev_cap->eqc_entry_sz,
694 dev->caps.num_eqs, dev->caps.num_eqs,
697 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
702 * Reserved MTT entries must be aligned up to a cacheline
703 * boundary, since the FW will write to them, while the driver
704 * writes to all other MTT entries. (The variable
705 * dev->caps.mtt_entry_sz below is really the MTT segment
706 * size, not the raw entry size)
708 dev->caps.reserved_mtts =
709 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
710 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
712 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
714 dev->caps.mtt_entry_sz,
715 dev->caps.num_mtt_segs,
716 dev->caps.reserved_mtts, 1, 0);
718 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
722 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
724 dev_cap->dmpt_entry_sz,
726 dev->caps.reserved_mrws, 1, 1);
728 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
732 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
734 dev_cap->qpc_entry_sz,
736 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
739 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
743 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
745 dev_cap->aux_entry_sz,
747 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
750 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
754 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
756 dev_cap->altc_entry_sz,
758 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
761 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
765 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
766 init_hca->rdmarc_base,
767 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
769 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
772 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
776 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
778 dev_cap->cqc_entry_sz,
780 dev->caps.reserved_cqs, 0, 0);
782 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
783 goto err_unmap_rdmarc;
786 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
788 dev_cap->srq_entry_sz,
790 dev->caps.reserved_srqs, 0, 0);
792 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
797 * It's not strictly required, but for simplicity just map the
798 * whole multicast group table now. The table isn't very big
799 * and it's a lot easier than trying to track ref counts.
801 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
802 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
803 dev->caps.num_mgms + dev->caps.num_amgms,
804 dev->caps.num_mgms + dev->caps.num_amgms,
807 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
814 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
817 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
820 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
823 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
826 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
829 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
832 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
835 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
838 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
841 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
842 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
843 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
844 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
847 mlx4_UNMAP_ICM_AUX(dev);
850 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
855 static void mlx4_free_icms(struct mlx4_dev *dev)
857 struct mlx4_priv *priv = mlx4_priv(dev);
859 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
860 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
861 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
862 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
863 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
864 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
865 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
866 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
867 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
868 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
869 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
870 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
871 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
872 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
874 mlx4_UNMAP_ICM_AUX(dev);
875 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
878 static int map_bf_area(struct mlx4_dev *dev)
880 struct mlx4_priv *priv = mlx4_priv(dev);
881 resource_size_t bf_start;
882 resource_size_t bf_len;
885 bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
886 bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
887 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
888 if (!priv->bf_mapping)
894 static void unmap_bf_area(struct mlx4_dev *dev)
896 if (mlx4_priv(dev)->bf_mapping)
897 io_mapping_free(mlx4_priv(dev)->bf_mapping);
900 static void mlx4_close_hca(struct mlx4_dev *dev)
903 mlx4_CLOSE_HCA(dev, 0);
906 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
909 static int mlx4_init_hca(struct mlx4_dev *dev)
911 struct mlx4_priv *priv = mlx4_priv(dev);
912 struct mlx4_adapter adapter;
913 struct mlx4_dev_cap dev_cap;
914 struct mlx4_mod_stat_cfg mlx4_cfg;
915 struct mlx4_profile profile;
916 struct mlx4_init_hca_param init_hca;
917 struct mlx4_port_config *config;
922 err = mlx4_QUERY_FW(dev);
925 mlx4_info(dev, "non-primary physical function, skipping.\n");
927 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
931 err = mlx4_load_fw(dev);
933 mlx4_err(dev, "Failed to start FW, aborting.\n");
937 mlx4_cfg.log_pg_sz_m = 1;
938 mlx4_cfg.log_pg_sz = 0;
939 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
941 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
943 err = mlx4_dev_cap(dev, &dev_cap);
945 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
949 process_mod_param_profile();
950 profile = default_profile;
952 list_for_each_entry(config, &config_list, list) {
953 if (config->pdev == dev->pdev) {
954 for (i = 1; i <= dev->caps.num_ports; i++) {
955 dev->caps.possible_type[i] = config->port_type[i];
956 if (config->port_type[i] != MLX4_PORT_TYPE_AUTO)
957 dev->caps.port_type[i] = config->port_type[i];
962 mlx4_set_port_mask(dev);
963 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
964 if ((long long) icm_size < 0) {
969 if (map_bf_area(dev))
970 mlx4_dbg(dev, "Kernel support for blue flame is not available for kernels < 2.6.28\n");
972 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
974 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
978 err = mlx4_INIT_HCA(dev, &init_hca);
980 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
984 err = mlx4_QUERY_ADAPTER(dev, &adapter);
986 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
990 priv->eq_table.inta_pin = adapter.inta_pin;
991 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
996 mlx4_CLOSE_HCA(dev, 0);
1004 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1009 static int mlx4_init_counters_table(struct mlx4_dev *dev)
1011 struct mlx4_priv *priv = mlx4_priv(dev);
1015 switch (dev->caps.counters_mode) {
1016 case MLX4_CUNTERS_BASIC:
1017 nent = dev->caps.max_basic_counters;
1019 case MLX4_CUNTERS_EXT:
1020 nent = dev->caps.max_ext_counters;
1025 err = mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
1032 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
1034 switch (dev->caps.counters_mode) {
1035 case MLX4_CUNTERS_BASIC:
1036 case MLX4_CUNTERS_EXT:
1037 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
1044 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1046 struct mlx4_priv *priv = mlx4_priv(dev);
1048 switch (dev->caps.counters_mode) {
1049 case MLX4_CUNTERS_BASIC:
1050 case MLX4_CUNTERS_EXT:
1051 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
1059 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
1061 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1063 switch (dev->caps.counters_mode) {
1064 case MLX4_CUNTERS_BASIC:
1065 case MLX4_CUNTERS_EXT:
1066 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
1072 EXPORT_SYMBOL_GPL(mlx4_counter_free);
1074 static int mlx4_setup_hca(struct mlx4_dev *dev)
1076 struct mlx4_priv *priv = mlx4_priv(dev);
1079 __be32 ib_port_default_caps;
1081 err = mlx4_init_uar_table(dev);
1083 mlx4_err(dev, "Failed to initialize "
1084 "user access region table, aborting.\n");
1088 err = mlx4_uar_alloc(dev, &priv->driver_uar);
1090 mlx4_err(dev, "Failed to allocate driver access region, "
1092 goto err_uar_table_free;
1095 priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1097 mlx4_err(dev, "Couldn't map kernel access region, "
1103 err = mlx4_init_pd_table(dev);
1105 mlx4_err(dev, "Failed to initialize "
1106 "protection domain table, aborting.\n");
1110 err = mlx4_init_xrcd_table(dev);
1112 mlx4_err(dev, "Failed to initialize extended "
1113 "reliably connected domain table, aborting.\n");
1114 goto err_pd_table_free;
1117 err = mlx4_init_mr_table(dev);
1119 mlx4_err(dev, "Failed to initialize "
1120 "memory region table, aborting.\n");
1121 goto err_xrcd_table_free;
1124 err = mlx4_init_eq_table(dev);
1126 mlx4_err(dev, "Failed to initialize "
1127 "event queue table, aborting.\n");
1128 goto err_mr_table_free;
1131 err = mlx4_cmd_use_events(dev);
1133 mlx4_err(dev, "Failed to switch to event-driven "
1134 "firmware commands, aborting.\n");
1135 goto err_eq_table_free;
1138 err = mlx4_NOP(dev);
1140 if (dev->flags & MLX4_FLAG_MSI_X) {
1141 mlx4_warn(dev, "NOP command failed to generate MSI-X "
1142 "interrupt IRQ %d).\n",
1143 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1144 mlx4_warn(dev, "Trying again without MSI-X.\n");
1146 mlx4_err(dev, "NOP command failed to generate interrupt "
1147 "(IRQ %d), aborting.\n",
1148 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1149 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
1155 mlx4_dbg(dev, "NOP command IRQ test passed\n");
1157 err = mlx4_init_cq_table(dev);
1159 mlx4_err(dev, "Failed to initialize "
1160 "completion queue table, aborting.\n");
1164 err = mlx4_init_srq_table(dev);
1166 mlx4_err(dev, "Failed to initialize "
1167 "shared receive queue table, aborting.\n");
1168 goto err_cq_table_free;
1171 err = mlx4_init_qp_table(dev);
1173 mlx4_err(dev, "Failed to initialize "
1174 "queue pair table, aborting.\n");
1175 goto err_srq_table_free;
1178 err = mlx4_init_mcg_table(dev);
1180 mlx4_err(dev, "Failed to initialize "
1181 "multicast group table, aborting.\n");
1182 goto err_qp_table_free;
1185 err = mlx4_init_counters_table(dev);
1186 if (err && err != -ENOENT) {
1187 mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
1188 goto err_mcg_table_free;
1191 for (port = 1; port <= dev->caps.num_ports; port++) {
1192 ib_port_default_caps = 0;
1193 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
1195 mlx4_warn(dev, "failed to get port %d default "
1196 "ib capabilities (%d). Continuing with "
1197 "caps = 0\n", port, err);
1198 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1199 err = mlx4_SET_PORT(dev, port);
1201 mlx4_err(dev, "Failed to set port %d, aborting\n",
1203 goto err_counters_table_free;
1209 err_counters_table_free:
1210 mlx4_cleanup_counters_table(dev);
1213 mlx4_cleanup_mcg_table(dev);
1216 mlx4_cleanup_qp_table(dev);
1219 mlx4_cleanup_srq_table(dev);
1222 mlx4_cleanup_cq_table(dev);
1225 mlx4_cmd_use_polling(dev);
1228 mlx4_cleanup_eq_table(dev);
1231 mlx4_cleanup_mr_table(dev);
1233 err_xrcd_table_free:
1234 mlx4_cleanup_xrcd_table(dev);
1237 mlx4_cleanup_pd_table(dev);
1243 mlx4_uar_free(dev, &priv->driver_uar);
1246 mlx4_cleanup_uar_table(dev);
1250 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1252 struct mlx4_priv *priv = mlx4_priv(dev);
1253 struct msix_entry *entries;
1259 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
1260 num_possible_cpus() + 1);
1261 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
1265 for (i = 0; i < nreq; ++i)
1266 entries[i].entry = i;
1269 err = pci_enable_msix(dev->pdev, entries, nreq);
1271 /* Try again if at least 2 vectors are available */
1273 mlx4_info(dev, "Requested %d vectors, "
1274 "but only %d MSI-X vectors available, "
1275 "trying again\n", nreq, err);
1283 dev->caps.num_comp_vectors = nreq - 1;
1284 for (i = 0; i < nreq; ++i)
1285 priv->eq_table.eq[i].irq = entries[i].vector;
1287 dev->flags |= MLX4_FLAG_MSI_X;
1294 dev->caps.num_comp_vectors = 1;
1296 for (i = 0; i < 2; ++i)
1297 priv->eq_table.eq[i].irq = dev->pdev->irq;
1300 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
1302 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
1307 mlx4_init_mac_table(dev, &info->mac_table);
1308 mlx4_init_vlan_table(dev, &info->vlan_table);
1310 sprintf(info->dev_name, "mlx4_port%d", port);
1311 info->port_attr.attr.name = info->dev_name;
1312 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
1313 info->port_attr.show = show_port_type;
1314 info->port_attr.store = set_port_type;
1316 err = device_create_file(&dev->pdev->dev, &info->port_attr);
1318 mlx4_err(dev, "Failed to create file for port %d\n", port);
1325 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1330 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1333 static int mlx4_init_trigger(struct mlx4_priv *priv)
1335 memcpy(&priv->trigger_attr, &dev_attr_port_trigger,
1336 sizeof(struct device_attribute));
1337 return device_create_file(&priv->dev.pdev->dev, &priv->trigger_attr);
1340 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1342 struct mlx4_priv *priv;
1343 struct mlx4_dev *dev;
1348 printk(KERN_INFO PFX "Initializing %s\n",
1351 err = pci_enable_device(pdev);
1353 dev_err(&pdev->dev, "Cannot enable PCI device, "
1359 * Check for BARs. We expect 0: 1MB
1361 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
1362 pci_resource_len(pdev, 0) != 1 << 20) {
1363 dev_err(&pdev->dev, "Missing DCS, aborting.\n");
1365 goto err_disable_pdev;
1367 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
1368 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
1370 goto err_disable_pdev;
1373 err = pci_request_region(pdev, 0, DRV_NAME);
1375 dev_err(&pdev->dev, "Cannot request control region, aborting.\n");
1376 goto err_disable_pdev;
1379 err = pci_request_region(pdev, 2, DRV_NAME);
1381 dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
1382 goto err_release_bar0;
1385 pci_set_master(pdev);
1387 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1389 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1390 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1392 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1393 goto err_release_bar2;
1396 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1398 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
1399 "consistent PCI DMA mask.\n");
1400 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1402 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1404 goto err_release_bar2;
1408 priv = kzalloc(sizeof *priv, GFP_KERNEL);
1410 dev_err(&pdev->dev, "Device struct alloc failed, "
1413 goto err_release_bar2;
1418 INIT_LIST_HEAD(&priv->ctx_list);
1419 spin_lock_init(&priv->ctx_lock);
1421 mutex_init(&priv->port_mutex);
1423 INIT_LIST_HEAD(&priv->pgdir_list);
1424 mutex_init(&priv->pgdir_mutex);
1425 for (i = 0; i < MLX4_MAX_PORTS; ++i)
1426 priv->iboe_counter_index[i] = -1;
1428 INIT_LIST_HEAD(&priv->bf_list);
1429 mutex_init(&priv->bf_mutex);
1432 * Now reset the HCA before we touch the PCI capabilities or
1433 * attempt a firmware command, since a boot ROM may have left
1434 * the HCA in an undefined state.
1436 err = mlx4_reset(dev);
1438 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
1442 if (mlx4_cmd_init(dev)) {
1443 mlx4_err(dev, "Failed to init command interface, aborting.\n");
1447 err = mlx4_init_hca(dev);
1451 err = mlx4_alloc_eq_table(dev);
1455 mlx4_enable_msi_x(dev);
1457 err = mlx4_setup_hca(dev);
1458 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
1459 dev->flags &= ~MLX4_FLAG_MSI_X;
1460 pci_disable_msix(pdev);
1461 err = mlx4_setup_hca(dev);
1467 for (port = 1; port <= dev->caps.num_ports; port++) {
1468 err = mlx4_init_port_info(dev, port);
1473 err = mlx4_register_device(dev);
1477 err = mlx4_init_trigger(priv);
1481 err = mlx4_sense_init(dev);
1485 mlx4_start_sense(dev);
1487 pci_set_drvdata(pdev, dev);
1492 device_remove_file(&dev->pdev->dev, &priv->trigger_attr);
1494 mlx4_unregister_device(dev);
1496 for (--port; port >= 1; --port)
1497 mlx4_cleanup_port_info(&priv->port[port]);
1499 mlx4_cleanup_counters_table(dev);
1500 mlx4_cleanup_mcg_table(dev);
1501 mlx4_cleanup_qp_table(dev);
1502 mlx4_cleanup_srq_table(dev);
1503 mlx4_cleanup_cq_table(dev);
1504 mlx4_cmd_use_polling(dev);
1505 mlx4_cleanup_eq_table(dev);
1506 mlx4_cleanup_mr_table(dev);
1507 mlx4_cleanup_xrcd_table(dev);
1508 mlx4_cleanup_pd_table(dev);
1509 mlx4_cleanup_uar_table(dev);
1512 mlx4_free_eq_table(dev);
1515 if (dev->flags & MLX4_FLAG_MSI_X)
1516 pci_disable_msix(pdev);
1518 mlx4_close_hca(dev);
1521 mlx4_cmd_cleanup(dev);
1527 pci_release_region(pdev, 2);
1530 pci_release_region(pdev, 0);
1533 pci_disable_device(pdev);
1534 pci_set_drvdata(pdev, NULL);
1538 static int __devinit mlx4_init_one(struct pci_dev *pdev,
1539 const struct pci_device_id *id)
1541 static int mlx4_version_printed;
1543 if (!mlx4_version_printed) {
1544 printk(KERN_INFO "%s", mlx4_version);
1545 ++mlx4_version_printed;
1548 return __mlx4_init_one(pdev, id);
1551 static void mlx4_remove_one(struct pci_dev *pdev)
1553 struct mlx4_dev *dev = pci_get_drvdata(pdev);
1554 struct mlx4_priv *priv = mlx4_priv(dev);
1558 mlx4_sense_cleanup(dev);
1559 mlx4_unregister_device(dev);
1560 device_remove_file(&dev->pdev->dev, &priv->trigger_attr);
1562 for (p = 1; p <= dev->caps.num_ports; p++) {
1563 mlx4_cleanup_port_info(&priv->port[p]);
1564 mlx4_CLOSE_PORT(dev, p);
1567 mlx4_cleanup_counters_table(dev);
1568 mlx4_cleanup_mcg_table(dev);
1569 mlx4_cleanup_qp_table(dev);
1570 mlx4_cleanup_srq_table(dev);
1571 mlx4_cleanup_cq_table(dev);
1572 mlx4_cmd_use_polling(dev);
1573 mlx4_cleanup_eq_table(dev);
1574 mlx4_cleanup_mr_table(dev);
1575 mlx4_cleanup_xrcd_table(dev);
1576 mlx4_cleanup_pd_table(dev);
1579 mlx4_uar_free(dev, &priv->driver_uar);
1580 mlx4_cleanup_uar_table(dev);
1581 mlx4_free_eq_table(dev);
1582 mlx4_close_hca(dev);
1583 mlx4_cmd_cleanup(dev);
1585 if (dev->flags & MLX4_FLAG_MSI_X)
1586 pci_disable_msix(pdev);
1589 pci_release_region(pdev, 2);
1590 pci_release_region(pdev, 0);
1591 pci_disable_device(pdev);
1592 pci_set_drvdata(pdev, NULL);
1596 int mlx4_restart_one(struct pci_dev *pdev)
1598 mlx4_remove_one(pdev);
1599 return __mlx4_init_one(pdev, NULL);
1602 static struct pci_device_id mlx4_pci_table[] = {
1603 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
1604 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
1605 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
1606 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
1607 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
1608 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
1609 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
1610 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
1611 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
1612 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2 */
1613 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX VPI PCIe 2.0 5GT/s - IB QDR / 10GigE Virt+ */
1614 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX EN 40GigE PCIe 2.0 5GT/s */
1615 { PCI_VDEVICE(MELLANOX, 0x6778) }, /* MT26488 ConnectX VPI PCIe 2.0 5GT/s - IB DDR / 10GigE Virt+ */
1616 { PCI_VDEVICE(MELLANOX, 0x1000) },
1617 { PCI_VDEVICE(MELLANOX, 0x1001) },
1618 { PCI_VDEVICE(MELLANOX, 0x1002) },
1619 { PCI_VDEVICE(MELLANOX, 0x1003) },
1620 { PCI_VDEVICE(MELLANOX, 0x1004) },
1621 { PCI_VDEVICE(MELLANOX, 0x1005) },
1622 { PCI_VDEVICE(MELLANOX, 0x1006) },
1623 { PCI_VDEVICE(MELLANOX, 0x1007) },
1624 { PCI_VDEVICE(MELLANOX, 0x1008) },
1625 { PCI_VDEVICE(MELLANOX, 0x1009) },
1626 { PCI_VDEVICE(MELLANOX, 0x100a) },
1627 { PCI_VDEVICE(MELLANOX, 0x100b) },
1628 { PCI_VDEVICE(MELLANOX, 0x100c) },
1629 { PCI_VDEVICE(MELLANOX, 0x100d) },
1630 { PCI_VDEVICE(MELLANOX, 0x100e) },
1631 { PCI_VDEVICE(MELLANOX, 0x100f) },
1635 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
1637 static struct pci_driver mlx4_driver = {
1639 .id_table = mlx4_pci_table,
1640 .probe = mlx4_init_one,
1641 .remove = __devexit_p(mlx4_remove_one)
1644 static int __init mlx4_verify_params(void)
1646 if ((log_num_mac < 0) || (log_num_mac > 7)) {
1647 printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac);
1651 if (log_mtts_per_seg == 0)
1652 log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
1653 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
1654 printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1661 static int __init mlx4_init(void)
1665 mutex_init(&drv_mutex);
1667 if (mlx4_verify_params())
1672 mlx4_wq = create_singlethread_workqueue("mlx4");
1676 ret = pci_register_driver(&mlx4_driver);
1677 return ret < 0 ? ret : 0;
1680 static void __exit mlx4_cleanup(void)
1682 mutex_lock(&drv_mutex);
1683 mlx4_config_cleanup();
1684 pci_unregister_driver(&mlx4_driver);
1685 mutex_unlock(&drv_mutex);
1686 destroy_workqueue(mlx4_wq);
1689 module_init_order(mlx4_init, SI_ORDER_MIDDLE);
1690 module_exit(mlx4_cleanup);
1692 #undef MODULE_VERSION
1693 #include <sys/module.h>
1695 mlx4_evhand(module_t mod, int event, void *arg)
1700 static moduledata_t mlx4_mod = {
1702 .evhand = mlx4_evhand,
1704 MODULE_VERSION(mlx4, 1);
1705 DECLARE_MODULE(mlx4, mlx4_mod, SI_SUB_SMP, SI_ORDER_ANY);