2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #define LINUXKPI_PARAM_PREFIX mlx4_
38 #include <linux/kmod.h>
39 #include <linux/module.h>
40 #include <linux/errno.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/slab.h>
44 #include <linux/io-mapping.h>
45 #include <linux/delay.h>
46 #include <linux/netdevice.h>
47 #include <linux/string.h>
49 #include <linux/cache.h>
50 #include <linux/random.h>
52 #include <dev/mlx4/device.h>
53 #include <dev/mlx4/doorbell.h>
58 #include <dev/mlx4/stats.h>
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
62 MODULE_LICENSE("Dual BSD/GPL");
64 struct workqueue_struct *mlx4_wq;
66 #ifdef CONFIG_MLX4_DEBUG
68 int mlx4_debug_level = 0;
69 module_param_named(debug_level, mlx4_debug_level, int, 0644);
70 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
72 #endif /* CONFIG_MLX4_DEBUG */
77 module_param(msi_x, int, 0444);
78 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
80 #else /* CONFIG_PCI_MSI */
84 #endif /* CONFIG_PCI_MSI */
86 static uint8_t num_vfs[3] = {0, 0, 0};
87 static int num_vfs_argc;
88 module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
89 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
90 "num_vfs=port1,port2,port1+2");
92 static uint8_t probe_vf[3] = {0, 0, 0};
93 static int probe_vfs_argc;
94 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
95 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
96 "probe_vf=port1,port2,port1+2");
98 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
99 module_param_named(log_num_mgm_entry_size,
100 mlx4_log_num_mgm_entry_size, int, 0444);
101 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
102 " of qp per mcg, for example:"
103 " 10 gives 248.range: 7 <="
104 " log_num_mgm_entry_size <= 12."
105 " To activate device managed"
106 " flow steering when available, set to -1");
108 static bool enable_64b_cqe_eqe = true;
109 module_param(enable_64b_cqe_eqe, bool, 0444);
110 MODULE_PARM_DESC(enable_64b_cqe_eqe,
111 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
113 static bool enable_4k_uar;
114 module_param(enable_4k_uar, bool, 0444);
115 MODULE_PARM_DESC(enable_4k_uar,
116 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)");
118 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
119 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
120 MLX4_FUNC_CAP_DMFS_A0_STATIC)
122 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
124 static char mlx4_description[] = "Mellanox driver"
125 " (" DRV_VERSION ")";
127 static char mlx4_version[] =
128 DRV_NAME ": Mellanox ConnectX core driver v"
129 DRV_VERSION " (" DRV_RELDATE ")\n";
131 static struct mlx4_profile default_profile = {
134 .rdmarc_per_qp = 1 << 4,
138 .num_mtt = 1 << 20, /* It is really num mtt segements */
141 static struct mlx4_profile low_mem_profile = {
144 .rdmarc_per_qp = 1 << 4,
151 static int log_num_mac = 7;
152 module_param_named(log_num_mac, log_num_mac, int, 0444);
153 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
155 static int log_num_vlan;
156 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
157 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
158 /* Log2 max number of VLANs per ETH port (0-7) */
159 #define MLX4_LOG_NUM_VLANS 7
160 #define MLX4_MIN_LOG_NUM_VLANS 0
161 #define MLX4_MIN_LOG_NUM_MAC 1
163 static bool use_prio;
164 module_param_named(use_prio, use_prio, bool, 0444);
165 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
167 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
168 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
169 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
171 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
173 struct mlx4_port_config {
174 struct list_head list;
175 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
176 struct pci_dev *pdev;
179 static atomic_t pf_loading = ATOMIC_INIT(0);
181 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
182 struct mlx4_dev_cap *dev_cap)
184 /* The reserved_uars is calculated by system page size unit.
185 * Therefore, adjustment is added when the uar page size is less
186 * than the system page size
188 dev->caps.reserved_uars =
190 mlx4_get_num_reserved_uar(dev),
191 dev_cap->reserved_uars /
192 (1 << (PAGE_SHIFT - dev->uar_page_shift)));
195 int mlx4_check_port_params(struct mlx4_dev *dev,
196 enum mlx4_port_type *port_type)
200 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
201 for (i = 0; i < dev->caps.num_ports - 1; i++) {
202 if (port_type[i] != port_type[i + 1]) {
203 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
209 for (i = 0; i < dev->caps.num_ports; i++) {
210 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
211 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
219 static void mlx4_set_port_mask(struct mlx4_dev *dev)
223 for (i = 1; i <= dev->caps.num_ports; ++i)
224 dev->caps.port_mask[i] = dev->caps.port_type[i];
228 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
231 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
234 struct mlx4_func func;
236 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
237 err = mlx4_QUERY_FUNC(dev, &func, 0);
239 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
242 dev_cap->max_eqs = func.max_eq;
243 dev_cap->reserved_eqs = func.rsvd_eqs;
244 dev_cap->reserved_uars = func.rsvd_uars;
245 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
250 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
252 struct mlx4_caps *dev_cap = &dev->caps;
254 /* FW not supporting or cancelled by user */
255 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
256 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
259 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
260 * When FW has NCSI it may decide not to report 64B CQE/EQEs
262 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
263 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
264 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
265 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
269 if (cache_line_size() == 128 || cache_line_size() == 256) {
270 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
271 /* Changing the real data inside CQE size to 32B */
272 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
273 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
275 if (mlx4_is_master(dev))
276 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
278 if (cache_line_size() != 32 && cache_line_size() != 64)
279 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
280 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
281 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
285 static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
286 struct mlx4_port_cap *port_cap)
288 dev->caps.vl_cap[port] = port_cap->max_vl;
289 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu;
290 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids;
291 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
292 /* set gid and pkey table operating lengths by default
293 * to non-sriov values
295 dev->caps.gid_table_len[port] = port_cap->max_gids;
296 dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
297 dev->caps.port_width_cap[port] = port_cap->max_port_width;
298 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
299 dev->caps.max_tc_eth = port_cap->max_tc_eth;
300 dev->caps.def_mac[port] = port_cap->def_mac;
301 dev->caps.supported_type[port] = port_cap->supported_port_types;
302 dev->caps.suggested_type[port] = port_cap->suggested_type;
303 dev->caps.default_sense[port] = port_cap->default_sense;
304 dev->caps.trans_type[port] = port_cap->trans_type;
305 dev->caps.vendor_oui[port] = port_cap->vendor_oui;
306 dev->caps.wavelength[port] = port_cap->wavelength;
307 dev->caps.trans_code[port] = port_cap->trans_code;
312 static int mlx4_dev_port(struct mlx4_dev *dev, int port,
313 struct mlx4_port_cap *port_cap)
317 err = mlx4_QUERY_PORT(dev, port, port_cap);
320 mlx4_err(dev, "QUERY_PORT command failed.\n");
325 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
327 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
330 if (mlx4_is_mfunc(dev)) {
331 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
332 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
336 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
338 "Keep FCS is not supported - Disabling Ignore FCS");
339 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
344 #define MLX4_A0_STEERING_TABLE_SIZE 256
345 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
350 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
352 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
355 mlx4_dev_cap_dump(dev, dev_cap);
357 if (dev_cap->min_page_sz > PAGE_SIZE) {
358 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
359 dev_cap->min_page_sz, (long)PAGE_SIZE);
362 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
363 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
364 dev_cap->num_ports, MLX4_MAX_PORTS);
368 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
369 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
372 pci_resource_len(dev->persist->pdev, 2));
376 dev->caps.num_ports = dev_cap->num_ports;
377 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
378 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
379 dev->caps.num_sys_eqs :
381 for (i = 1; i <= dev->caps.num_ports; ++i) {
382 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
384 mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
389 dev->caps.uar_page_size = PAGE_SIZE;
390 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
391 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
392 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
393 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
394 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
395 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
396 dev->caps.max_wqes = dev_cap->max_qp_sz;
397 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
398 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
399 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
400 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
401 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
402 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
404 * Subtract 1 from the limit because we need to allocate a
405 * spare CQE so the HCA HW can tell the difference between an
406 * empty CQ and a full CQ.
408 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
409 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
410 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
411 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
412 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
414 dev->caps.reserved_pds = dev_cap->reserved_pds;
415 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
416 dev_cap->reserved_xrcds : 0;
417 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
418 dev_cap->max_xrcds : 0;
419 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
421 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
422 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
423 dev->caps.flags = dev_cap->flags;
424 dev->caps.flags2 = dev_cap->flags2;
425 dev->caps.bmme_flags = dev_cap->bmme_flags;
426 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
427 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
428 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
429 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
431 /* Save uar page shift */
432 if (!mlx4_is_slave(dev)) {
433 /* Virtual PCI function needs to determine UAR page size from
434 * firmware. Only master PCI function can set the uar page size
437 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
439 dev->uar_page_shift = PAGE_SHIFT;
441 mlx4_set_num_reserved_uars(dev, dev_cap);
444 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
445 struct mlx4_init_hca_param hca_param;
447 memset(&hca_param, 0, sizeof(hca_param));
448 err = mlx4_QUERY_HCA(dev, &hca_param);
449 /* Turn off PHV_EN flag in case phv_check_en is set.
450 * phv_check_en is a HW check that parse the packet and verify
451 * phv bit was reported correctly in the wqe. To allow QinQ
452 * PHV_EN flag should be set and phv_check_en must be cleared
453 * otherwise QinQ packets will be drop by the HW.
455 if (err || hca_param.phv_check_en)
456 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
459 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
460 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
461 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
462 /* Don't do sense port on multifunction devices (for now at least) */
463 if (mlx4_is_mfunc(dev))
464 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
466 if (mlx4_low_memory_profile()) {
467 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC;
468 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
470 dev->caps.log_num_macs = log_num_mac;
471 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
474 for (i = 1; i <= dev->caps.num_ports; ++i) {
475 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
476 if (dev->caps.supported_type[i]) {
477 /* if only ETH is supported - assign ETH */
478 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
479 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
480 /* if only IB is supported, assign IB */
481 else if (dev->caps.supported_type[i] ==
483 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
485 /* if IB and ETH are supported, we set the port
486 * type according to user selection of port type;
487 * if user selected none, take the FW hint */
488 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
489 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
490 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
492 dev->caps.port_type[i] = port_type_array[i - 1];
496 * Link sensing is allowed on the port if 3 conditions are true:
497 * 1. Both protocols are supported on the port.
498 * 2. Different types are supported on the port
499 * 3. FW declared that it supports link sensing
501 mlx4_priv(dev)->sense.sense_allowed[i] =
502 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
503 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
504 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
507 * If "default_sense" bit is set, we move the port to "AUTO" mode
508 * and perform sense_port FW command to try and set the correct
509 * port type from beginning
511 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
512 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
513 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
514 mlx4_SENSE_PORT(dev, i, &sensed_port);
515 if (sensed_port != MLX4_PORT_TYPE_NONE)
516 dev->caps.port_type[i] = sensed_port;
518 dev->caps.possible_type[i] = dev->caps.port_type[i];
521 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
522 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
523 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
524 i, 1 << dev->caps.log_num_macs);
526 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
527 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
528 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
529 i, 1 << dev->caps.log_num_vlans);
533 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
534 (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
535 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
537 "Granular QoS per VF not supported with IB/Eth configuration\n");
538 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
541 dev->caps.max_counters = dev_cap->max_counters;
543 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
544 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
545 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
546 (1 << dev->caps.log_num_macs) *
547 (1 << dev->caps.log_num_vlans) *
549 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
551 if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
552 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
553 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
555 dev->caps.dmfs_high_rate_qpn_base =
556 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
558 if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
559 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
560 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
561 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
562 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
564 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
565 dev->caps.dmfs_high_rate_qpn_base =
566 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
567 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
570 dev->caps.rl_caps = dev_cap->rl_caps;
572 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
573 dev->caps.dmfs_high_rate_qpn_range;
575 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
576 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
577 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
578 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
580 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
582 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
584 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
585 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
586 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
587 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
590 if (dev_cap->flags2 &
591 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
592 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
593 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
594 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
595 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
599 if ((dev->caps.flags &
600 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
602 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
604 if (!mlx4_is_slave(dev)) {
605 mlx4_enable_cqe_eqe_stride(dev);
606 dev->caps.alloc_res_qp_mask =
607 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
610 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
611 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
612 mlx4_warn(dev, "Old device ETS support detected\n");
613 mlx4_warn(dev, "Consider upgrading device FW.\n");
614 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
618 dev->caps.alloc_res_qp_mask = 0;
621 mlx4_enable_ignore_fcs(dev);
626 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
627 enum pci_bus_speed *speed,
628 enum pcie_link_width *width)
630 u32 lnkcap1, lnkcap2;
633 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
635 *speed = PCI_SPEED_UNKNOWN;
636 *width = PCIE_LNK_WIDTH_UNKNOWN;
638 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP,
640 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2,
642 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
643 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
644 *speed = PCIE_SPEED_8_0GT;
645 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
646 *speed = PCIE_SPEED_5_0GT;
647 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
648 *speed = PCIE_SPEED_2_5GT;
651 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
652 if (!lnkcap2) { /* pre-r3.0 */
653 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
654 *speed = PCIE_SPEED_5_0GT;
655 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
656 *speed = PCIE_SPEED_2_5GT;
660 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
662 err2 ? err2 : -EINVAL;
667 static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
669 enum pcie_link_width width, width_cap;
670 enum pci_bus_speed speed, speed_cap;
673 #define PCIE_SPEED_STR(speed) \
674 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
675 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
676 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
679 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
682 "Unable to determine PCIe device BW capabilities\n");
686 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width);
687 if (err || speed == PCI_SPEED_UNKNOWN ||
688 width == PCIE_LNK_WIDTH_UNKNOWN) {
690 "Unable to determine PCI device chain minimum BW\n");
694 if (width != width_cap || speed != speed_cap)
696 "PCIe BW is different than device's capability\n");
698 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
699 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
700 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
705 /*The function checks if there are live vf, return the num of them*/
706 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
708 struct mlx4_priv *priv = mlx4_priv(dev);
709 struct mlx4_slave_state *s_state;
713 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
714 s_state = &priv->mfunc.master.slave_state[i];
715 if (s_state->active && s_state->last_cmd !=
716 MLX4_COMM_CMD_RESET) {
717 mlx4_warn(dev, "%s: slave: %d is still active\n",
725 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
727 u32 qk = MLX4_RESERVED_QKEY_BASE;
729 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
730 qpn < dev->phys_caps.base_proxy_sqpn)
733 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
735 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
737 qk += qpn - dev->phys_caps.base_proxy_sqpn;
741 EXPORT_SYMBOL(mlx4_get_parav_qkey);
743 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
745 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
747 if (!mlx4_is_master(dev))
750 priv->virt2phys_pkey[slave][port - 1][i] = val;
752 EXPORT_SYMBOL(mlx4_sync_pkey_table);
754 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
756 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
758 if (!mlx4_is_master(dev))
761 priv->slave_node_guids[slave] = guid;
763 EXPORT_SYMBOL(mlx4_put_slave_node_guid);
765 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
767 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
769 if (!mlx4_is_master(dev))
772 return priv->slave_node_guids[slave];
774 EXPORT_SYMBOL(mlx4_get_slave_node_guid);
776 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
778 struct mlx4_priv *priv = mlx4_priv(dev);
779 struct mlx4_slave_state *s_slave;
781 if (!mlx4_is_master(dev))
784 s_slave = &priv->mfunc.master.slave_state[slave];
785 return !!s_slave->active;
787 EXPORT_SYMBOL(mlx4_is_slave_active);
789 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
790 struct mlx4_dev_cap *dev_cap,
791 struct mlx4_init_hca_param *hca_param)
793 dev->caps.steering_mode = hca_param->steering_mode;
794 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
795 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
796 dev->caps.fs_log_max_ucast_qp_range_size =
797 dev_cap->fs_log_max_ucast_qp_range_size;
799 dev->caps.num_qp_per_mgm =
800 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
802 mlx4_dbg(dev, "Steering mode is: %s\n",
803 mlx4_steering_mode_str(dev->caps.steering_mode));
806 static int mlx4_slave_cap(struct mlx4_dev *dev)
810 struct mlx4_dev_cap dev_cap;
811 struct mlx4_func_cap func_cap;
812 struct mlx4_init_hca_param hca_param;
815 memset(&hca_param, 0, sizeof(hca_param));
816 err = mlx4_QUERY_HCA(dev, &hca_param);
818 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
822 /* fail if the hca has an unknown global capability
823 * at this time global_caps should be always zeroed
825 if (hca_param.global_caps) {
826 mlx4_err(dev, "Unknown hca global capabilities\n");
830 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
832 dev->caps.hca_core_clock = hca_param.hca_core_clock;
834 memset(&dev_cap, 0, sizeof(dev_cap));
835 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
836 err = mlx4_dev_cap(dev, &dev_cap);
838 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
842 err = mlx4_QUERY_FW(dev);
844 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
846 page_size = ~dev->caps.page_size_cap + 1;
847 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
848 if (page_size > PAGE_SIZE) {
849 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
850 page_size, (long)PAGE_SIZE);
854 /* Set uar_page_shift for VF */
855 dev->uar_page_shift = hca_param.uar_page_sz + 12;
857 /* Make sure the master uar page size is valid */
858 if (dev->uar_page_shift > PAGE_SHIFT) {
860 "Invalid configuration: uar page size is larger than system page size\n");
864 /* Set reserved_uars based on the uar_page_shift */
865 mlx4_set_num_reserved_uars(dev, &dev_cap);
867 /* Although uar page size in FW differs from system page size,
868 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
869 * still works with assumption that uar page size == system page size
871 dev->caps.uar_page_size = PAGE_SIZE;
873 memset(&func_cap, 0, sizeof(func_cap));
874 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
876 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
881 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
882 PF_CONTEXT_BEHAVIOUR_MASK) {
883 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
884 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK);
888 dev->caps.num_ports = func_cap.num_ports;
889 dev->quotas.qp = func_cap.qp_quota;
890 dev->quotas.srq = func_cap.srq_quota;
891 dev->quotas.cq = func_cap.cq_quota;
892 dev->quotas.mpt = func_cap.mpt_quota;
893 dev->quotas.mtt = func_cap.mtt_quota;
894 dev->caps.num_qps = 1 << hca_param.log_num_qps;
895 dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
896 dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
897 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
898 dev->caps.num_eqs = func_cap.max_eq;
899 dev->caps.reserved_eqs = func_cap.reserved_eq;
900 dev->caps.reserved_lkey = func_cap.reserved_lkey;
901 dev->caps.num_pds = MLX4_NUM_PDS;
902 dev->caps.num_mgms = 0;
903 dev->caps.num_amgms = 0;
905 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
906 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
907 dev->caps.num_ports, MLX4_MAX_PORTS);
911 mlx4_replace_zero_macs(dev);
913 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
914 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
915 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
916 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
917 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
919 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
920 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy ||
921 !dev->caps.qp0_qkey) {
926 for (i = 1; i <= dev->caps.num_ports; ++i) {
927 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap);
929 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
933 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
934 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
935 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
936 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
937 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
938 dev->caps.port_mask[i] = dev->caps.port_type[i];
939 dev->caps.phys_port_id[i] = func_cap.phys_port_id;
940 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
941 &dev->caps.gid_table_len[i],
942 &dev->caps.pkey_table_len[i]);
947 if (dev->caps.uar_page_size * (dev->caps.num_uars -
948 dev->caps.reserved_uars) >
949 pci_resource_len(dev->persist->pdev,
951 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
952 dev->caps.uar_page_size * dev->caps.num_uars,
954 pci_resource_len(dev->persist->pdev, 2));
959 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
960 dev->caps.eqe_size = 64;
961 dev->caps.eqe_factor = 1;
963 dev->caps.eqe_size = 32;
964 dev->caps.eqe_factor = 0;
967 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
968 dev->caps.cqe_size = 64;
969 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
971 dev->caps.cqe_size = 32;
974 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
975 dev->caps.eqe_size = hca_param.eqe_size;
976 dev->caps.eqe_factor = 0;
979 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
980 dev->caps.cqe_size = hca_param.cqe_size;
981 /* User still need to know when CQE > 32B */
982 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
985 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
986 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
988 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
989 mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
990 hca_param.rss_ip_frags ? "on" : "off");
992 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
993 dev->caps.bf_reg_size)
994 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
996 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
997 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
1002 kfree(dev->caps.qp0_qkey);
1003 kfree(dev->caps.qp0_tunnel);
1004 kfree(dev->caps.qp0_proxy);
1005 kfree(dev->caps.qp1_tunnel);
1006 kfree(dev->caps.qp1_proxy);
1007 dev->caps.qp0_qkey = NULL;
1008 dev->caps.qp0_tunnel = NULL;
1009 dev->caps.qp0_proxy = NULL;
1010 dev->caps.qp1_tunnel = NULL;
1011 dev->caps.qp1_proxy = NULL;
1016 static void mlx4_request_modules(struct mlx4_dev *dev)
1019 int has_ib_port = false;
1020 int has_eth_port = false;
1021 #define EN_DRV_NAME "mlx4_en"
1022 #define IB_DRV_NAME "mlx4_ib"
1024 for (port = 1; port <= dev->caps.num_ports; port++) {
1025 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
1027 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
1028 has_eth_port = true;
1032 request_module_nowait(EN_DRV_NAME);
1033 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
1034 request_module_nowait(IB_DRV_NAME);
1038 * Change the port configuration of the device.
1039 * Every user of this function must hold the port mutex.
1041 int mlx4_change_port_types(struct mlx4_dev *dev,
1042 enum mlx4_port_type *port_types)
1048 for (port = 0; port < dev->caps.num_ports; port++) {
1049 /* Change the port type only if the new type is different
1050 * from the current, and not set to Auto */
1051 if (port_types[port] != dev->caps.port_type[port + 1])
1055 mlx4_unregister_device(dev);
1056 for (port = 1; port <= dev->caps.num_ports; port++) {
1057 mlx4_CLOSE_PORT(dev, port);
1058 dev->caps.port_type[port] = port_types[port - 1];
1059 err = mlx4_SET_PORT(dev, port, -1);
1061 mlx4_err(dev, "Failed to set port %d, aborting\n",
1066 mlx4_set_port_mask(dev);
1067 err = mlx4_register_device(dev);
1069 mlx4_err(dev, "Failed to register device\n");
1072 mlx4_request_modules(dev);
1079 static ssize_t show_port_type(struct device *dev,
1080 struct device_attribute *attr,
1083 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1085 struct mlx4_dev *mdev = info->dev;
1089 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
1091 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
1092 sprintf(buf, "auto (%s)\n", type);
1094 sprintf(buf, "%s\n", type);
1099 static int __set_port_type(struct mlx4_port_info *info,
1100 enum mlx4_port_type port_type)
1102 struct mlx4_dev *mdev = info->dev;
1103 struct mlx4_priv *priv = mlx4_priv(mdev);
1104 enum mlx4_port_type types[MLX4_MAX_PORTS];
1105 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
1109 if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
1111 "Requested port type for port %d is not supported on this HCA\n",
1117 mlx4_stop_sense(mdev);
1118 mutex_lock(&priv->port_mutex);
1119 info->tmp_type = port_type;
1121 /* Possible type is always the one that was delivered */
1122 mdev->caps.possible_type[info->port] = info->tmp_type;
1124 for (i = 0; i < mdev->caps.num_ports; i++) {
1125 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
1126 mdev->caps.possible_type[i+1];
1127 if (types[i] == MLX4_PORT_TYPE_AUTO)
1128 types[i] = mdev->caps.port_type[i+1];
1131 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
1132 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
1133 for (i = 1; i <= mdev->caps.num_ports; i++) {
1134 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
1135 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
1141 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1145 mlx4_do_sense_ports(mdev, new_types, types);
1147 err = mlx4_check_port_params(mdev, new_types);
1151 /* We are about to apply the changes after the configuration
1152 * was verified, no need to remember the temporary types
1154 for (i = 0; i < mdev->caps.num_ports; i++)
1155 priv->port[i + 1].tmp_type = 0;
1157 err = mlx4_change_port_types(mdev, new_types);
1160 mutex_unlock(&priv->port_mutex);
1161 mlx4_start_sense(mdev);
1166 static ssize_t set_port_type(struct device *dev,
1167 struct device_attribute *attr,
1168 const char *buf, size_t count)
1170 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1172 struct mlx4_dev *mdev = info->dev;
1173 enum mlx4_port_type port_type;
1174 static DEFINE_MUTEX(set_port_type_mutex);
1177 mutex_lock(&set_port_type_mutex);
1179 if (!strcmp(buf, "ib\n")) {
1180 port_type = MLX4_PORT_TYPE_IB;
1181 } else if (!strcmp(buf, "eth\n")) {
1182 port_type = MLX4_PORT_TYPE_ETH;
1183 } else if (!strcmp(buf, "auto\n")) {
1184 port_type = MLX4_PORT_TYPE_AUTO;
1186 mlx4_err(mdev, "%s is not supported port type\n", buf);
1191 err = __set_port_type(info, port_type);
1194 mutex_unlock(&set_port_type_mutex);
1196 return err ? err : count;
1207 static inline int int_to_ibta_mtu(int mtu)
1210 case 256: return IB_MTU_256;
1211 case 512: return IB_MTU_512;
1212 case 1024: return IB_MTU_1024;
1213 case 2048: return IB_MTU_2048;
1214 case 4096: return IB_MTU_4096;
1219 static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
1222 case IB_MTU_256: return 256;
1223 case IB_MTU_512: return 512;
1224 case IB_MTU_1024: return 1024;
1225 case IB_MTU_2048: return 2048;
1226 case IB_MTU_4096: return 4096;
1231 static ssize_t show_port_ib_mtu(struct device *dev,
1232 struct device_attribute *attr,
1235 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1237 struct mlx4_dev *mdev = info->dev;
1239 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
1240 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1242 sprintf(buf, "%d\n",
1243 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
1247 static ssize_t set_port_ib_mtu(struct device *dev,
1248 struct device_attribute *attr,
1249 const char *buf, size_t count)
1251 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1253 struct mlx4_dev *mdev = info->dev;
1254 struct mlx4_priv *priv = mlx4_priv(mdev);
1255 int err, port, mtu, ibta_mtu = -1;
1257 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
1258 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1262 err = kstrtoint(buf, 0, &mtu);
1264 ibta_mtu = int_to_ibta_mtu(mtu);
1266 if (err || ibta_mtu < 0) {
1267 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
1271 mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
1273 mlx4_stop_sense(mdev);
1274 mutex_lock(&priv->port_mutex);
1275 mlx4_unregister_device(mdev);
1276 for (port = 1; port <= mdev->caps.num_ports; port++) {
1277 mlx4_CLOSE_PORT(mdev, port);
1278 err = mlx4_SET_PORT(mdev, port, -1);
1280 mlx4_err(mdev, "Failed to set port %d, aborting\n",
1285 err = mlx4_register_device(mdev);
1287 mutex_unlock(&priv->port_mutex);
1288 mlx4_start_sense(mdev);
1289 return err ? err : count;
1292 /* bond for multi-function device */
1293 #define MAX_MF_BOND_ALLOWED_SLAVES 63
1294 static int mlx4_mf_bond(struct mlx4_dev *dev)
1298 struct mlx4_slaves_pport slaves_port1;
1299 struct mlx4_slaves_pport slaves_port2;
1300 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
1302 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1);
1303 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2);
1304 bitmap_and(slaves_port_1_2,
1305 slaves_port1.slaves, slaves_port2.slaves,
1306 dev->persist->num_vfs + 1);
1308 /* only single port vfs are allowed */
1309 if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) {
1310 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n");
1314 /* number of virtual functions is number of total functions minus one
1315 * physical function for each port.
1317 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
1318 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
1320 /* limit on maximum allowed VFs */
1321 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
1322 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
1323 nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
1327 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1328 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
1332 err = mlx4_bond_mac_table(dev);
1335 err = mlx4_bond_vlan_table(dev);
1338 err = mlx4_bond_fs_rules(dev);
1344 (void)mlx4_unbond_vlan_table(dev);
1346 (void)mlx4_unbond_mac_table(dev);
1350 static int mlx4_mf_unbond(struct mlx4_dev *dev)
1354 ret = mlx4_unbond_fs_rules(dev);
1356 mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret);
1357 ret1 = mlx4_unbond_mac_table(dev);
1359 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
1362 ret1 = mlx4_unbond_vlan_table(dev);
1364 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1);
1370 int mlx4_bond(struct mlx4_dev *dev)
1373 struct mlx4_priv *priv = mlx4_priv(dev);
1375 mutex_lock(&priv->bond_mutex);
1377 if (!mlx4_is_bonded(dev)) {
1378 ret = mlx4_do_bond(dev, true);
1380 mlx4_err(dev, "Failed to bond device: %d\n", ret);
1381 if (!ret && mlx4_is_master(dev)) {
1382 ret = mlx4_mf_bond(dev);
1384 mlx4_err(dev, "bond for multifunction failed\n");
1385 mlx4_do_bond(dev, false);
1390 mutex_unlock(&priv->bond_mutex);
1392 mlx4_dbg(dev, "Device is bonded\n");
1396 EXPORT_SYMBOL_GPL(mlx4_bond);
1398 int mlx4_unbond(struct mlx4_dev *dev)
1401 struct mlx4_priv *priv = mlx4_priv(dev);
1403 mutex_lock(&priv->bond_mutex);
1405 if (mlx4_is_bonded(dev)) {
1408 ret = mlx4_do_bond(dev, false);
1410 mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1411 if (mlx4_is_master(dev))
1412 ret2 = mlx4_mf_unbond(dev);
1414 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2);
1419 mutex_unlock(&priv->bond_mutex);
1421 mlx4_dbg(dev, "Device is unbonded\n");
1425 EXPORT_SYMBOL_GPL(mlx4_unbond);
1428 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1430 u8 port1 = v2p->port1;
1431 u8 port2 = v2p->port2;
1432 struct mlx4_priv *priv = mlx4_priv(dev);
1435 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1438 mutex_lock(&priv->bond_mutex);
1440 /* zero means keep current mapping for this port */
1442 port1 = priv->v2p.port1;
1444 port2 = priv->v2p.port2;
1446 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1447 (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1448 (port1 == 2 && port2 == 1)) {
1449 /* besides boundary checks cross mapping makes
1450 * no sense and therefore not allowed */
1452 } else if ((port1 == priv->v2p.port1) &&
1453 (port2 == priv->v2p.port2)) {
1456 err = mlx4_virt2phy_port_map(dev, port1, port2);
1458 mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1460 priv->v2p.port1 = port1;
1461 priv->v2p.port2 = port2;
1463 mlx4_err(dev, "Failed to change port mape: %d\n", err);
1467 mutex_unlock(&priv->bond_mutex);
1470 EXPORT_SYMBOL_GPL(mlx4_port_map_set);
1472 static int mlx4_load_fw(struct mlx4_dev *dev)
1474 struct mlx4_priv *priv = mlx4_priv(dev);
1477 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1478 GFP_HIGHUSER | __GFP_NOWARN, 0);
1479 if (!priv->fw.fw_icm) {
1480 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1484 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1486 mlx4_err(dev, "MAP_FA command failed, aborting\n");
1490 err = mlx4_RUN_FW(dev);
1492 mlx4_err(dev, "RUN_FW command failed, aborting\n");
1502 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1506 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1509 struct mlx4_priv *priv = mlx4_priv(dev);
1513 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1515 ((u64) (MLX4_CMPT_TYPE_QP *
1516 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1517 cmpt_entry_sz, dev->caps.num_qps,
1518 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1523 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1525 ((u64) (MLX4_CMPT_TYPE_SRQ *
1526 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1527 cmpt_entry_sz, dev->caps.num_srqs,
1528 dev->caps.reserved_srqs, 0, 0);
1532 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1534 ((u64) (MLX4_CMPT_TYPE_CQ *
1535 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1536 cmpt_entry_sz, dev->caps.num_cqs,
1537 dev->caps.reserved_cqs, 0, 0);
1541 num_eqs = dev->phys_caps.num_phys_eqs;
1542 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1544 ((u64) (MLX4_CMPT_TYPE_EQ *
1545 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1546 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1553 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1556 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1559 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1565 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1566 struct mlx4_init_hca_param *init_hca, u64 icm_size)
1568 struct mlx4_priv *priv = mlx4_priv(dev);
1573 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1575 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1579 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1580 (unsigned long long) icm_size >> 10,
1581 (unsigned long long) aux_pages << 2);
1583 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1584 GFP_HIGHUSER | __GFP_NOWARN, 0);
1585 if (!priv->fw.aux_icm) {
1586 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1590 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1592 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1596 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1598 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1603 num_eqs = dev->phys_caps.num_phys_eqs;
1604 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1605 init_hca->eqc_base, dev_cap->eqc_entry_sz,
1606 num_eqs, num_eqs, 0, 0);
1608 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1609 goto err_unmap_cmpt;
1613 * Reserved MTT entries must be aligned up to a cacheline
1614 * boundary, since the FW will write to them, while the driver
1615 * writes to all other MTT entries. (The variable
1616 * dev->caps.mtt_entry_sz below is really the MTT segment
1617 * size, not the raw entry size)
1619 dev->caps.reserved_mtts =
1620 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1621 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1623 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1625 dev->caps.mtt_entry_sz,
1627 dev->caps.reserved_mtts, 1, 0);
1629 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1633 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1634 init_hca->dmpt_base,
1635 dev_cap->dmpt_entry_sz,
1637 dev->caps.reserved_mrws, 1, 1);
1639 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1643 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1645 dev_cap->qpc_entry_sz,
1647 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1650 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1651 goto err_unmap_dmpt;
1654 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1655 init_hca->auxc_base,
1656 dev_cap->aux_entry_sz,
1658 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1661 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1665 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1666 init_hca->altc_base,
1667 dev_cap->altc_entry_sz,
1669 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1672 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1673 goto err_unmap_auxc;
1676 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1677 init_hca->rdmarc_base,
1678 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1680 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1683 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1684 goto err_unmap_altc;
1687 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1689 dev_cap->cqc_entry_sz,
1691 dev->caps.reserved_cqs, 0, 0);
1693 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1694 goto err_unmap_rdmarc;
1697 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1698 init_hca->srqc_base,
1699 dev_cap->srq_entry_sz,
1701 dev->caps.reserved_srqs, 0, 0);
1703 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1708 * For flow steering device managed mode it is required to use
1709 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1710 * required, but for simplicity just map the whole multicast
1711 * group table now. The table isn't very big and it's a lot
1712 * easier than trying to track ref counts.
1714 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1716 mlx4_get_mgm_entry_size(dev),
1717 dev->caps.num_mgms + dev->caps.num_amgms,
1718 dev->caps.num_mgms + dev->caps.num_amgms,
1721 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1728 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1731 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1734 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1737 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1740 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1743 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1746 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1749 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1752 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1755 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1756 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1757 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1758 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1761 mlx4_UNMAP_ICM_AUX(dev);
1764 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1769 static void mlx4_free_icms(struct mlx4_dev *dev)
1771 struct mlx4_priv *priv = mlx4_priv(dev);
1773 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1774 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1775 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1776 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1777 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1778 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1779 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1780 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1781 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1782 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1783 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1784 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1785 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1786 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1788 mlx4_UNMAP_ICM_AUX(dev);
1789 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1792 static void mlx4_slave_exit(struct mlx4_dev *dev)
1794 struct mlx4_priv *priv = mlx4_priv(dev);
1796 mutex_lock(&priv->cmd.slave_cmd_mutex);
1797 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1799 mlx4_warn(dev, "Failed to close slave function\n");
1800 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1803 static int map_bf_area(struct mlx4_dev *dev)
1805 struct mlx4_priv *priv = mlx4_priv(dev);
1806 resource_size_t bf_start;
1807 resource_size_t bf_len;
1810 if (!dev->caps.bf_reg_size)
1813 bf_start = pci_resource_start(dev->persist->pdev, 2) +
1814 (dev->caps.num_uars << PAGE_SHIFT);
1815 bf_len = pci_resource_len(dev->persist->pdev, 2) -
1816 (dev->caps.num_uars << PAGE_SHIFT);
1817 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1818 if (!priv->bf_mapping)
1824 static void unmap_bf_area(struct mlx4_dev *dev)
1826 if (mlx4_priv(dev)->bf_mapping)
1827 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1830 s64 mlx4_read_clock(struct mlx4_dev *dev)
1832 u32 clockhi, clocklo, clockhi1;
1835 struct mlx4_priv *priv = mlx4_priv(dev);
1837 if (!priv->clock_mapping)
1840 for (i = 0; i < 10; i++) {
1841 clockhi = swab32(readl(priv->clock_mapping));
1842 clocklo = swab32(readl(priv->clock_mapping + 4));
1843 clockhi1 = swab32(readl(priv->clock_mapping));
1844 if (clockhi == clockhi1)
1848 cycles = (u64) clockhi << 32 | (u64) clocklo;
1850 return cycles & CORE_CLOCK_MASK;
1852 EXPORT_SYMBOL_GPL(mlx4_read_clock);
1855 static int map_internal_clock(struct mlx4_dev *dev)
1857 struct mlx4_priv *priv = mlx4_priv(dev);
1859 priv->clock_mapping =
1860 ioremap(pci_resource_start(dev->persist->pdev,
1861 priv->fw.clock_bar) +
1862 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1864 if (!priv->clock_mapping)
1870 int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1871 struct mlx4_clock_params *params)
1873 struct mlx4_priv *priv = mlx4_priv(dev);
1875 if (mlx4_is_slave(dev))
1881 params->bar = priv->fw.clock_bar;
1882 params->offset = priv->fw.clock_offset;
1883 params->size = MLX4_CLOCK_SIZE;
1887 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
1889 static void unmap_internal_clock(struct mlx4_dev *dev)
1891 struct mlx4_priv *priv = mlx4_priv(dev);
1893 if (priv->clock_mapping)
1894 iounmap(priv->clock_mapping);
1897 static void mlx4_close_hca(struct mlx4_dev *dev)
1899 sysctl_ctx_free(&dev->hw_ctx);
1900 unmap_internal_clock(dev);
1902 if (mlx4_is_slave(dev))
1903 mlx4_slave_exit(dev);
1905 mlx4_CLOSE_HCA(dev, 0);
1906 mlx4_free_icms(dev);
1910 static void mlx4_close_fw(struct mlx4_dev *dev)
1912 if (!mlx4_is_slave(dev)) {
1914 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1918 static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1920 #define COMM_CHAN_OFFLINE_OFFSET 0x09
1925 struct mlx4_priv *priv = mlx4_priv(dev);
1927 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
1928 while (time_before(jiffies, end)) {
1929 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
1930 MLX4_COMM_CHAN_FLAGS));
1931 offline_bit = (comm_flags &
1932 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
1935 /* There are cases as part of AER/Reset flow that PF needs
1936 * around 100 msec to load. We therefore sleep for 100 msec
1937 * to allow other tasks to make use of that CPU during this
1942 mlx4_err(dev, "Communication channel is offline.\n");
1946 static void mlx4_reset_vf_support(struct mlx4_dev *dev)
1948 #define COMM_CHAN_RST_OFFSET 0x1e
1950 struct mlx4_priv *priv = mlx4_priv(dev);
1954 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
1955 MLX4_COMM_CHAN_CAPS));
1956 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
1959 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
1962 static int mlx4_init_slave(struct mlx4_dev *dev)
1964 struct mlx4_priv *priv = mlx4_priv(dev);
1965 u64 dma = (u64) priv->mfunc.vhcr_dma;
1966 int ret_from_reset = 0;
1968 u32 cmd_channel_ver;
1970 if (atomic_read(&pf_loading)) {
1971 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
1975 mutex_lock(&priv->cmd.slave_cmd_mutex);
1976 priv->cmd.max_cmds = 1;
1977 if (mlx4_comm_check_offline(dev)) {
1978 mlx4_err(dev, "PF is not responsive, skipping initialization\n");
1982 mlx4_reset_vf_support(dev);
1983 mlx4_warn(dev, "Sending reset\n");
1984 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1985 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
1986 /* if we are in the middle of flr the slave will try
1987 * NUM_OF_RESET_RETRIES times before leaving.*/
1988 if (ret_from_reset) {
1989 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1990 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
1991 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1997 /* check the driver version - the slave I/F revision
1998 * must match the master's */
1999 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
2000 cmd_channel_ver = mlx4_comm_get_version();
2002 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
2003 MLX4_COMM_GET_IF_REV(slave_read)) {
2004 mlx4_err(dev, "slave driver version is not supported by the master\n");
2008 mlx4_warn(dev, "Sending vhcr0\n");
2009 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
2010 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2012 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
2013 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2015 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
2016 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2018 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
2019 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2022 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2026 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
2028 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2032 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
2036 for (i = 1; i <= dev->caps.num_ports; i++) {
2037 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
2038 dev->caps.gid_table_len[i] =
2039 mlx4_get_slave_num_gids(dev, 0, i);
2041 dev->caps.gid_table_len[i] = 1;
2042 dev->caps.pkey_table_len[i] =
2043 dev->phys_caps.pkey_phys_table_len[i] - 1;
2047 static int choose_log_fs_mgm_entry_size(int qp_per_entry)
2049 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
2051 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
2053 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
2057 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
2060 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
2062 switch (dmfs_high_steer_mode) {
2063 case MLX4_STEERING_DMFS_A0_DEFAULT:
2064 return "default performance";
2066 case MLX4_STEERING_DMFS_A0_DYNAMIC:
2067 return "dynamic hybrid mode";
2069 case MLX4_STEERING_DMFS_A0_STATIC:
2070 return "performance optimized for limited rule configuration (static)";
2072 case MLX4_STEERING_DMFS_A0_DISABLE:
2073 return "disabled performance optimized steering";
2075 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
2076 return "performance optimized steering not supported";
2079 return "Unrecognized mode";
2083 #define MLX4_DMFS_A0_STEERING (1UL << 2)
2085 static void choose_steering_mode(struct mlx4_dev *dev,
2086 struct mlx4_dev_cap *dev_cap)
2088 if (mlx4_log_num_mgm_entry_size <= 0) {
2089 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
2090 if (dev->caps.dmfs_high_steer_mode ==
2091 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2092 mlx4_err(dev, "DMFS high rate mode not supported\n");
2094 dev->caps.dmfs_high_steer_mode =
2095 MLX4_STEERING_DMFS_A0_STATIC;
2099 if (mlx4_log_num_mgm_entry_size <= 0 &&
2100 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
2101 (!mlx4_is_mfunc(dev) ||
2102 (dev_cap->fs_max_num_qp_per_entry >=
2103 (dev->persist->num_vfs + 1))) &&
2104 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
2105 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
2106 dev->oper_log_mgm_entry_size =
2107 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
2108 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
2109 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
2110 dev->caps.fs_log_max_ucast_qp_range_size =
2111 dev_cap->fs_log_max_ucast_qp_range_size;
2113 if (dev->caps.dmfs_high_steer_mode !=
2114 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2115 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
2116 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
2117 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2118 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
2120 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
2122 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
2123 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2124 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
2126 dev->oper_log_mgm_entry_size =
2127 mlx4_log_num_mgm_entry_size > 0 ?
2128 mlx4_log_num_mgm_entry_size :
2129 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
2130 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
2132 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
2133 mlx4_steering_mode_str(dev->caps.steering_mode),
2134 dev->oper_log_mgm_entry_size,
2135 mlx4_log_num_mgm_entry_size);
2138 static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
2139 struct mlx4_dev_cap *dev_cap)
2141 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2142 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
2143 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
2145 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
2147 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
2148 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
2151 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
2154 struct mlx4_port_cap port_cap;
2156 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2159 for (i = 1; i <= dev->caps.num_ports; i++) {
2160 if (mlx4_dev_port(dev, i, &port_cap)) {
2162 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
2163 } else if ((dev->caps.dmfs_high_steer_mode !=
2164 MLX4_STEERING_DMFS_A0_DEFAULT) &&
2165 (port_cap.dmfs_optimized_state ==
2166 !!(dev->caps.dmfs_high_steer_mode ==
2167 MLX4_STEERING_DMFS_A0_DISABLE))) {
2169 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
2170 dmfs_high_rate_steering_mode_str(
2171 dev->caps.dmfs_high_steer_mode),
2172 (port_cap.dmfs_optimized_state ?
2173 "enabled" : "disabled"));
2180 static int mlx4_init_fw(struct mlx4_dev *dev)
2182 struct mlx4_mod_stat_cfg mlx4_cfg;
2185 if (!mlx4_is_slave(dev)) {
2186 err = mlx4_QUERY_FW(dev);
2189 mlx4_info(dev, "non-primary physical function, skipping\n");
2191 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
2195 err = mlx4_load_fw(dev);
2197 mlx4_err(dev, "Failed to start FW, aborting\n");
2201 mlx4_cfg.log_pg_sz_m = 1;
2202 mlx4_cfg.log_pg_sz = 0;
2203 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
2205 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
2211 static int mlx4_init_hca(struct mlx4_dev *dev)
2213 struct mlx4_priv *priv = mlx4_priv(dev);
2214 struct mlx4_adapter adapter;
2215 struct mlx4_dev_cap dev_cap = {};
2216 struct mlx4_profile profile;
2217 struct mlx4_init_hca_param init_hca;
2219 struct mlx4_config_dev_params params;
2222 if (!mlx4_is_slave(dev)) {
2223 err = mlx4_dev_cap(dev, &dev_cap);
2225 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
2229 choose_steering_mode(dev, &dev_cap);
2230 choose_tunnel_offload_mode(dev, &dev_cap);
2232 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
2233 mlx4_is_master(dev))
2234 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
2236 err = mlx4_get_phys_port_id(dev);
2238 mlx4_err(dev, "Fail to get physical port id\n");
2240 if (mlx4_is_master(dev))
2241 mlx4_parav_master_pf_caps(dev);
2243 if (mlx4_low_memory_profile()) {
2244 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
2245 profile = low_mem_profile;
2247 profile = default_profile;
2249 if (dev->caps.steering_mode ==
2250 MLX4_STEERING_MODE_DEVICE_MANAGED)
2251 profile.num_mcg = MLX4_FS_NUM_MCG;
2253 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
2255 if ((long long) icm_size < 0) {
2260 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2262 if (enable_4k_uar) {
2263 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
2264 PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
2265 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
2267 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
2268 init_hca.uar_page_sz = PAGE_SHIFT - 12;
2271 init_hca.mw_enabled = 0;
2272 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2273 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2274 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
2276 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
2280 err = mlx4_INIT_HCA(dev, &init_hca);
2282 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
2286 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
2287 err = mlx4_query_func(dev, &dev_cap);
2289 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
2291 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
2292 dev->caps.num_eqs = dev_cap.max_eqs;
2293 dev->caps.reserved_eqs = dev_cap.reserved_eqs;
2294 dev->caps.reserved_uars = dev_cap.reserved_uars;
2299 * If TS is supported by FW
2300 * read HCA frequency by QUERY_HCA command
2302 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
2303 memset(&init_hca, 0, sizeof(init_hca));
2304 err = mlx4_QUERY_HCA(dev, &init_hca);
2306 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
2307 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2309 dev->caps.hca_core_clock =
2310 init_hca.hca_core_clock;
2313 /* In case we got HCA frequency 0 - disable timestamping
2314 * to avoid dividing by zero
2316 if (!dev->caps.hca_core_clock) {
2317 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2319 "HCA frequency is 0 - timestamping is not supported\n");
2320 } else if (map_internal_clock(dev)) {
2322 * Map internal clock,
2323 * in case of failure disable timestamping
2325 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2326 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
2330 if (dev->caps.dmfs_high_steer_mode !=
2331 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
2332 if (mlx4_validate_optimized_steering(dev))
2333 mlx4_warn(dev, "Optimized steering validation failed\n");
2335 if (dev->caps.dmfs_high_steer_mode ==
2336 MLX4_STEERING_DMFS_A0_DISABLE) {
2337 dev->caps.dmfs_high_rate_qpn_base =
2338 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
2339 dev->caps.dmfs_high_rate_qpn_range =
2340 MLX4_A0_STEERING_TABLE_SIZE;
2343 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n",
2344 dmfs_high_rate_steering_mode_str(
2345 dev->caps.dmfs_high_steer_mode));
2348 err = mlx4_init_slave(dev);
2351 mlx4_err(dev, "Failed to initialize slave\n");
2355 err = mlx4_slave_cap(dev);
2357 mlx4_err(dev, "Failed to obtain slave caps\n");
2362 if (map_bf_area(dev))
2363 mlx4_dbg(dev, "Failed to map blue flame area\n");
2365 /*Only the master set the ports, all the rest got it from it.*/
2366 if (!mlx4_is_slave(dev))
2367 mlx4_set_port_mask(dev);
2369 err = mlx4_QUERY_ADAPTER(dev, &adapter);
2371 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
2375 /* Query CONFIG_DEV parameters */
2376 err = mlx4_config_dev_retrieval(dev, ¶ms);
2377 if (err && err != -ENOTSUPP) {
2378 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
2380 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
2381 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
2383 priv->eq_table.inta_pin = adapter.inta_pin;
2384 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
2389 unmap_internal_clock(dev);
2392 if (mlx4_is_slave(dev)) {
2393 kfree(dev->caps.qp0_qkey);
2394 kfree(dev->caps.qp0_tunnel);
2395 kfree(dev->caps.qp0_proxy);
2396 kfree(dev->caps.qp1_tunnel);
2397 kfree(dev->caps.qp1_proxy);
2401 if (mlx4_is_slave(dev))
2402 mlx4_slave_exit(dev);
2404 mlx4_CLOSE_HCA(dev, 0);
2407 if (!mlx4_is_slave(dev))
2408 mlx4_free_icms(dev);
2413 static int mlx4_init_counters_table(struct mlx4_dev *dev)
2415 struct mlx4_priv *priv = mlx4_priv(dev);
2418 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2421 if (!dev->caps.max_counters)
2424 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
2425 /* reserve last counter index for sink counter */
2426 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
2428 nent_pow2 - dev->caps.max_counters + 1);
2431 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
2433 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2436 if (!dev->caps.max_counters)
2439 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
2442 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
2444 struct mlx4_priv *priv = mlx4_priv(dev);
2447 for (port = 0; port < dev->caps.num_ports; port++)
2448 if (priv->def_counter[port] != -1)
2449 mlx4_counter_free(dev, priv->def_counter[port]);
2452 static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2454 struct mlx4_priv *priv = mlx4_priv(dev);
2458 for (port = 0; port < dev->caps.num_ports; port++)
2459 priv->def_counter[port] = -1;
2461 for (port = 0; port < dev->caps.num_ports; port++) {
2462 err = mlx4_counter_alloc(dev, &idx);
2464 if (!err || err == -ENOSPC) {
2465 priv->def_counter[port] = idx;
2466 } else if (err == -ENOENT) {
2469 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2470 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2471 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2472 MLX4_SINK_COUNTER_INDEX(dev));
2475 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2476 __func__, port + 1, err);
2477 mlx4_cleanup_default_counters(dev);
2481 mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
2482 __func__, priv->def_counter[port], port + 1);
2488 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2490 struct mlx4_priv *priv = mlx4_priv(dev);
2492 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2495 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
2497 *idx = MLX4_SINK_COUNTER_INDEX(dev);
2504 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2509 if (mlx4_is_mfunc(dev)) {
2510 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
2511 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
2512 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2514 *idx = get_param_l(&out_param);
2518 return __mlx4_counter_alloc(dev, idx);
2520 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
2522 static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
2525 struct mlx4_cmd_mailbox *if_stat_mailbox;
2527 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
2529 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
2530 if (IS_ERR(if_stat_mailbox))
2531 return PTR_ERR(if_stat_mailbox);
2533 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
2534 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
2537 mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
2541 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2543 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2546 if (idx == MLX4_SINK_COUNTER_INDEX(dev))
2549 __mlx4_clear_if_stat(dev, idx);
2551 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
2555 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2559 if (mlx4_is_mfunc(dev)) {
2560 set_param_l(&in_param, idx);
2561 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
2562 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
2566 __mlx4_counter_free(dev, idx);
2568 EXPORT_SYMBOL_GPL(mlx4_counter_free);
2570 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
2572 struct mlx4_priv *priv = mlx4_priv(dev);
2574 return priv->def_counter[port - 1];
2576 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
2578 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
2580 struct mlx4_priv *priv = mlx4_priv(dev);
2582 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2584 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
2586 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
2588 struct mlx4_priv *priv = mlx4_priv(dev);
2590 return priv->mfunc.master.vf_admin[entry].vport[port].guid;
2592 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
2594 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
2596 struct mlx4_priv *priv = mlx4_priv(dev);
2603 get_random_bytes((char *)&guid, sizeof(guid));
2604 guid &= ~(cpu_to_be64(1ULL << 56));
2605 guid |= cpu_to_be64(1ULL << 57);
2606 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2609 static int mlx4_setup_hca(struct mlx4_dev *dev)
2611 struct mlx4_priv *priv = mlx4_priv(dev);
2614 __be32 ib_port_default_caps;
2616 err = mlx4_init_uar_table(dev);
2618 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
2622 err = mlx4_uar_alloc(dev, &priv->driver_uar);
2624 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
2625 goto err_uar_table_free;
2628 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
2630 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
2635 err = mlx4_init_pd_table(dev);
2637 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
2641 err = mlx4_init_xrcd_table(dev);
2643 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
2644 goto err_pd_table_free;
2647 err = mlx4_init_mr_table(dev);
2649 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
2650 goto err_xrcd_table_free;
2653 if (!mlx4_is_slave(dev)) {
2654 err = mlx4_init_mcg_table(dev);
2656 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
2657 goto err_mr_table_free;
2659 err = mlx4_config_mad_demux(dev);
2661 mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
2662 goto err_mcg_table_free;
2666 err = mlx4_init_eq_table(dev);
2668 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
2669 goto err_mcg_table_free;
2672 err = mlx4_cmd_use_events(dev);
2674 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
2675 goto err_eq_table_free;
2678 err = mlx4_NOP(dev);
2680 if (dev->flags & MLX4_FLAG_MSI_X) {
2681 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2682 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2683 mlx4_warn(dev, "Trying again without MSI-X\n");
2685 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2686 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2687 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
2693 mlx4_dbg(dev, "NOP command IRQ test passed\n");
2695 err = mlx4_init_cq_table(dev);
2697 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
2701 err = mlx4_init_srq_table(dev);
2703 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
2704 goto err_cq_table_free;
2707 err = mlx4_init_qp_table(dev);
2709 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
2710 goto err_srq_table_free;
2713 if (!mlx4_is_slave(dev)) {
2714 err = mlx4_init_counters_table(dev);
2715 if (err && err != -ENOENT) {
2716 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2717 goto err_qp_table_free;
2721 err = mlx4_allocate_default_counters(dev);
2723 mlx4_err(dev, "Failed to allocate default counters, aborting\n");
2724 goto err_counters_table_free;
2727 if (!mlx4_is_slave(dev)) {
2728 for (port = 1; port <= dev->caps.num_ports; port++) {
2729 ib_port_default_caps = 0;
2730 err = mlx4_get_port_ib_caps(dev, port,
2731 &ib_port_default_caps);
2733 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2735 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
2737 /* initialize per-slave default ib port capabilities */
2738 if (mlx4_is_master(dev)) {
2740 for (i = 0; i < dev->num_slaves; i++) {
2741 if (i == mlx4_master_func_num(dev))
2743 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
2744 ib_port_default_caps;
2748 if (mlx4_is_mfunc(dev))
2749 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
2751 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
2753 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2754 dev->caps.pkey_table_len[port] : -1);
2756 mlx4_err(dev, "Failed to set port %d, aborting\n",
2758 goto err_default_countes_free;
2765 err_default_countes_free:
2766 mlx4_cleanup_default_counters(dev);
2768 err_counters_table_free:
2769 if (!mlx4_is_slave(dev))
2770 mlx4_cleanup_counters_table(dev);
2773 mlx4_cleanup_qp_table(dev);
2776 mlx4_cleanup_srq_table(dev);
2779 mlx4_cleanup_cq_table(dev);
2782 mlx4_cmd_use_polling(dev);
2785 mlx4_cleanup_eq_table(dev);
2788 if (!mlx4_is_slave(dev))
2789 mlx4_cleanup_mcg_table(dev);
2792 mlx4_cleanup_mr_table(dev);
2794 err_xrcd_table_free:
2795 mlx4_cleanup_xrcd_table(dev);
2798 mlx4_cleanup_pd_table(dev);
2804 mlx4_uar_free(dev, &priv->driver_uar);
2807 mlx4_cleanup_uar_table(dev);
2811 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
2813 int requested_cpu = 0;
2814 struct mlx4_priv *priv = mlx4_priv(dev);
2819 if (eqn > dev->caps.num_comp_vectors)
2822 for (i = 1; i < port; i++)
2823 off += mlx4_get_eqs_per_port(dev, i);
2825 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
2827 /* Meaning EQs are shared, and this call comes from the second port */
2828 if (requested_cpu < 0)
2831 eq = &priv->eq_table.eq[eqn];
2833 eq->affinity_cpu_id = requested_cpu % num_online_cpus();
2838 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2840 struct mlx4_priv *priv = mlx4_priv(dev);
2841 struct msix_entry *entries;
2846 int nreq = dev->caps.num_ports * num_online_cpus() + 1;
2848 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
2850 if (nreq > MAX_MSIX)
2853 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2857 for (i = 0; i < nreq; ++i)
2858 entries[i].entry = i;
2860 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2863 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
2867 /* 1 is reserved for events (asyncrounous EQ) */
2868 dev->caps.num_comp_vectors = nreq - 1;
2870 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
2871 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
2872 dev->caps.num_ports);
2874 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
2875 if (i == MLX4_EQ_ASYNC)
2878 priv->eq_table.eq[i].irq =
2879 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
2881 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
2882 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2883 dev->caps.num_ports);
2884 /* We don't set affinity hint when there
2889 priv->eq_table.eq[i].actv_ports.ports);
2890 if (mlx4_init_affinity_hint(dev, port + 1, i))
2891 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
2894 /* We divide the Eqs evenly between the two ports.
2895 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
2896 * refers to the number of Eqs per port
2897 * (i.e eqs_per_port). Theoretically, we would like to
2898 * write something like (i + 1) % eqs_per_port == 0.
2899 * However, since there's an asynchronous Eq, we have
2900 * to skip over it by comparing this condition to
2901 * !!((i + 1) > MLX4_EQ_ASYNC).
2903 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
2905 (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
2906 !!((i + 1) > MLX4_EQ_ASYNC))
2907 /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
2908 * everything is shared anyway.
2913 dev->flags |= MLX4_FLAG_MSI_X;
2920 dev->caps.num_comp_vectors = 1;
2922 BUG_ON(MLX4_EQ_ASYNC >= 2);
2923 for (i = 0; i < 2; ++i) {
2924 priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
2925 if (i != MLX4_EQ_ASYNC) {
2926 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2927 dev->caps.num_ports);
2932 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2934 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
2939 if (!mlx4_is_slave(dev)) {
2940 mlx4_init_mac_table(dev, &info->mac_table);
2941 mlx4_init_vlan_table(dev, &info->vlan_table);
2942 mlx4_init_roce_gid_table(dev, &info->gid_table);
2943 info->base_qpn = mlx4_get_base_qpn(dev, port);
2946 sprintf(info->dev_name, "mlx4_port%d", port);
2947 info->port_attr.attr.name = info->dev_name;
2948 if (mlx4_is_mfunc(dev))
2949 info->port_attr.attr.mode = S_IRUGO;
2951 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
2952 info->port_attr.store = set_port_type;
2954 info->port_attr.show = show_port_type;
2955 sysfs_attr_init(&info->port_attr.attr);
2957 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
2959 mlx4_err(dev, "Failed to create file for port %d\n", port);
2963 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
2964 info->port_mtu_attr.attr.name = info->dev_mtu_name;
2965 if (mlx4_is_mfunc(dev))
2966 info->port_mtu_attr.attr.mode = S_IRUGO;
2968 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
2969 info->port_mtu_attr.store = set_port_ib_mtu;
2971 info->port_mtu_attr.show = show_port_ib_mtu;
2972 sysfs_attr_init(&info->port_mtu_attr.attr);
2974 err = device_create_file(&dev->persist->pdev->dev,
2975 &info->port_mtu_attr);
2977 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
2978 device_remove_file(&info->dev->persist->pdev->dev,
2986 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
2991 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
2992 device_remove_file(&info->dev->persist->pdev->dev,
2993 &info->port_mtu_attr);
2994 #ifdef CONFIG_RFS_ACCEL
2995 free_irq_cpu_rmap(info->rmap);
3000 static int mlx4_init_steering(struct mlx4_dev *dev)
3002 struct mlx4_priv *priv = mlx4_priv(dev);
3003 int num_entries = dev->caps.num_ports;
3006 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
3010 for (i = 0; i < num_entries; i++)
3011 for (j = 0; j < MLX4_NUM_STEERS; j++) {
3012 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
3013 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
3018 static void mlx4_clear_steering(struct mlx4_dev *dev)
3020 struct mlx4_priv *priv = mlx4_priv(dev);
3021 struct mlx4_steer_index *entry, *tmp_entry;
3022 struct mlx4_promisc_qp *pqp, *tmp_pqp;
3023 int num_entries = dev->caps.num_ports;
3026 for (i = 0; i < num_entries; i++) {
3027 for (j = 0; j < MLX4_NUM_STEERS; j++) {
3028 list_for_each_entry_safe(pqp, tmp_pqp,
3029 &priv->steer[i].promisc_qps[j],
3031 list_del(&pqp->list);
3034 list_for_each_entry_safe(entry, tmp_entry,
3035 &priv->steer[i].steer_entries[j],
3037 list_del(&entry->list);
3038 list_for_each_entry_safe(pqp, tmp_pqp,
3041 list_del(&pqp->list);
3051 static int extended_func_num(struct pci_dev *pdev)
3053 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
3056 #define MLX4_OWNER_BASE 0x8069c
3057 #define MLX4_OWNER_SIZE 4
3059 static int mlx4_get_ownership(struct mlx4_dev *dev)
3061 void __iomem *owner;
3064 if (pci_channel_offline(dev->persist->pdev))
3067 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3071 mlx4_err(dev, "Failed to obtain ownership bit\n");
3080 static void mlx4_free_ownership(struct mlx4_dev *dev)
3082 void __iomem *owner;
3084 if (pci_channel_offline(dev->persist->pdev))
3087 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3091 mlx4_err(dev, "Failed to obtain ownership bit\n");
3099 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
3100 !!((flags) & MLX4_FLAG_MASTER))
3102 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
3103 u8 total_vfs, int existing_vfs, int reset_flow)
3105 u64 dev_flags = dev->flags;
3109 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
3116 atomic_inc(&pf_loading);
3117 if (dev->flags & MLX4_FLAG_SRIOV) {
3118 if (existing_vfs != total_vfs) {
3119 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
3120 existing_vfs, total_vfs);
3121 total_vfs = existing_vfs;
3125 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL);
3126 if (NULL == dev->dev_vfs) {
3127 mlx4_err(dev, "Failed to allocate memory for VFs\n");
3131 if (!(dev->flags & MLX4_FLAG_SRIOV)) {
3132 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
3133 err = pci_enable_sriov(pdev, total_vfs);
3136 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
3140 mlx4_warn(dev, "Running in master mode\n");
3141 dev_flags |= MLX4_FLAG_SRIOV |
3143 dev_flags &= ~MLX4_FLAG_SLAVE;
3144 dev->persist->num_vfs = total_vfs;
3149 atomic_dec(&pf_loading);
3151 dev->persist->num_vfs = 0;
3152 kfree(dev->dev_vfs);
3153 dev->dev_vfs = NULL;
3154 return dev_flags & ~MLX4_FLAG_MASTER;
3158 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
3161 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
3164 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
3165 /* Checking for 64 VFs as a limitation of CX2 */
3166 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
3167 requested_vfs >= 64) {
3168 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
3170 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
3175 static int mlx4_pci_enable_device(struct mlx4_dev *dev)
3177 struct pci_dev *pdev = dev->persist->pdev;
3180 mutex_lock(&dev->persist->pci_status_mutex);
3181 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
3182 err = pci_enable_device(pdev);
3184 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
3186 mutex_unlock(&dev->persist->pci_status_mutex);
3191 static void mlx4_pci_disable_device(struct mlx4_dev *dev)
3193 struct pci_dev *pdev = dev->persist->pdev;
3195 mutex_lock(&dev->persist->pci_status_mutex);
3196 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
3197 pci_disable_device(pdev);
3198 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
3200 mutex_unlock(&dev->persist->pci_status_mutex);
3203 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3204 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3207 struct mlx4_dev *dev;
3212 struct mlx4_dev_cap *dev_cap = NULL;
3213 int existing_vfs = 0;
3217 INIT_LIST_HEAD(&priv->ctx_list);
3218 spin_lock_init(&priv->ctx_lock);
3220 mutex_init(&priv->port_mutex);
3221 mutex_init(&priv->bond_mutex);
3223 INIT_LIST_HEAD(&priv->pgdir_list);
3224 mutex_init(&priv->pgdir_mutex);
3225 spin_lock_init(&priv->cmd.context_lock);
3227 INIT_LIST_HEAD(&priv->bf_list);
3228 mutex_init(&priv->bf_mutex);
3230 dev->rev_id = pdev->revision;
3231 dev->numa_node = dev_to_node(&pdev->dev);
3233 /* Detect if this device is a virtual function */
3234 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3235 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
3236 dev->flags |= MLX4_FLAG_SLAVE;
3238 /* We reset the device and enable SRIOV only for physical
3239 * devices. Try to claim ownership on the device;
3240 * if already taken, skip -- do not allow multiple PFs */
3241 err = mlx4_get_ownership(dev);
3246 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
3251 atomic_set(&priv->opreq_count, 0);
3252 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
3255 * Now reset the HCA before we touch the PCI capabilities or
3256 * attempt a firmware command, since a boot ROM may have left
3257 * the HCA in an undefined state.
3259 err = mlx4_reset(dev);
3261 mlx4_err(dev, "Failed to reset HCA, aborting\n");
3266 dev->flags = MLX4_FLAG_MASTER;
3267 existing_vfs = pci_num_vf(pdev);
3269 dev->flags |= MLX4_FLAG_SRIOV;
3270 dev->persist->num_vfs = total_vfs;
3274 /* on load remove any previous indication of internal error,
3277 dev->persist->state = MLX4_DEVICE_STATE_UP;
3280 err = mlx4_cmd_init(dev);
3282 mlx4_err(dev, "Failed to init command interface, aborting\n");
3286 /* In slave functions, the communication channel must be initialized
3287 * before posting commands. Also, init num_slaves before calling
3289 if (mlx4_is_mfunc(dev)) {
3290 if (mlx4_is_master(dev)) {
3291 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
3294 dev->num_slaves = 0;
3295 err = mlx4_multi_func_init(dev);
3297 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
3303 err = mlx4_init_fw(dev);
3305 mlx4_err(dev, "Failed to init fw, aborting.\n");
3309 if (mlx4_is_master(dev)) {
3310 /* when we hit the goto slave_start below, dev_cap already initialized */
3312 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
3319 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3321 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3325 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3328 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3329 u64 dev_flags = mlx4_enable_sriov(dev, pdev,
3334 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3335 dev->flags = dev_flags;
3336 if (!SRIOV_VALID_STATE(dev->flags)) {
3337 mlx4_err(dev, "Invalid SRIOV state\n");
3340 err = mlx4_reset(dev);
3342 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
3348 /* Legacy mode FW requires SRIOV to be enabled before
3349 * doing QUERY_DEV_CAP, since max_eq's value is different if
3352 memset(dev_cap, 0, sizeof(*dev_cap));
3353 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3355 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3359 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3364 err = mlx4_init_hca(dev);
3366 if (err == -EACCES) {
3367 /* Not primary Physical function
3368 * Running in slave mode */
3369 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3370 /* We're not a PF */
3371 if (dev->flags & MLX4_FLAG_SRIOV) {
3373 pci_disable_sriov(pdev);
3374 if (mlx4_is_master(dev) && !reset_flow)
3375 atomic_dec(&pf_loading);
3376 dev->flags &= ~MLX4_FLAG_SRIOV;
3378 if (!mlx4_is_slave(dev))
3379 mlx4_free_ownership(dev);
3380 dev->flags |= MLX4_FLAG_SLAVE;
3381 dev->flags &= ~MLX4_FLAG_MASTER;
3387 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3388 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
3389 existing_vfs, reset_flow);
3391 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
3392 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
3393 dev->flags = dev_flags;
3394 err = mlx4_cmd_init(dev);
3396 /* Only VHCR is cleaned up, so could still
3399 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
3403 dev->flags = dev_flags;
3406 if (!SRIOV_VALID_STATE(dev->flags)) {
3407 mlx4_err(dev, "Invalid SRIOV state\n");
3412 /* check if the device is functioning at its maximum possible speed.
3413 * No return code for this call, just warn the user in case of PCI
3414 * express device capabilities are under-satisfied by the bus.
3416 if (!mlx4_is_slave(dev))
3417 mlx4_check_pcie_caps(dev);
3419 /* In master functions, the communication channel must be initialized
3420 * after obtaining its address from fw */
3421 if (mlx4_is_master(dev)) {
3422 if (dev->caps.num_ports < 2 &&
3426 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
3427 dev->caps.num_ports);
3430 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
3433 i < sizeof(dev->persist->nvfs)/
3434 sizeof(dev->persist->nvfs[0]); i++) {
3437 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
3438 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
3439 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
3440 dev->caps.num_ports;
3444 /* In master functions, the communication channel
3445 * must be initialized after obtaining its address from fw
3447 err = mlx4_multi_func_init(dev);
3449 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
3454 err = mlx4_alloc_eq_table(dev);
3456 goto err_master_mfunc;
3458 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
3459 mutex_init(&priv->msix_ctl.pool_lock);
3461 mlx4_enable_msi_x(dev);
3462 if ((mlx4_is_mfunc(dev)) &&
3463 !(dev->flags & MLX4_FLAG_MSI_X)) {
3465 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
3469 if (!mlx4_is_slave(dev)) {
3470 err = mlx4_init_steering(dev);
3472 goto err_disable_msix;
3475 mlx4_init_quotas(dev);
3477 err = mlx4_setup_hca(dev);
3478 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
3479 !mlx4_is_mfunc(dev)) {
3480 dev->flags &= ~MLX4_FLAG_MSI_X;
3481 dev->caps.num_comp_vectors = 1;
3482 pci_disable_msix(pdev);
3483 err = mlx4_setup_hca(dev);
3489 /* When PF resources are ready arm its comm channel to enable
3492 if (mlx4_is_master(dev)) {
3493 err = mlx4_ARM_COMM_CHANNEL(dev);
3495 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3501 for (port = 1; port <= dev->caps.num_ports; port++) {
3502 err = mlx4_init_port_info(dev, port);
3507 priv->v2p.port1 = 1;
3508 priv->v2p.port2 = 2;
3510 err = mlx4_register_device(dev);
3514 mlx4_request_modules(dev);
3516 mlx4_sense_init(dev);
3517 mlx4_start_sense(dev);
3521 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3522 atomic_dec(&pf_loading);
3528 for (--port; port >= 1; --port)
3529 mlx4_cleanup_port_info(&priv->port[port]);
3531 mlx4_cleanup_counters_table(dev);
3532 mlx4_cleanup_qp_table(dev);
3533 mlx4_cleanup_srq_table(dev);
3534 mlx4_cleanup_cq_table(dev);
3535 mlx4_cmd_use_polling(dev);
3536 mlx4_cleanup_eq_table(dev);
3537 mlx4_cleanup_mcg_table(dev);
3538 mlx4_cleanup_mr_table(dev);
3539 mlx4_cleanup_xrcd_table(dev);
3540 mlx4_cleanup_pd_table(dev);
3541 mlx4_cleanup_uar_table(dev);
3544 if (!mlx4_is_slave(dev))
3545 mlx4_clear_steering(dev);
3548 if (dev->flags & MLX4_FLAG_MSI_X)
3549 pci_disable_msix(pdev);
3552 mlx4_free_eq_table(dev);
3555 if (mlx4_is_master(dev)) {
3556 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
3557 mlx4_multi_func_cleanup(dev);
3560 if (mlx4_is_slave(dev)) {
3561 kfree(dev->caps.qp0_qkey);
3562 kfree(dev->caps.qp0_tunnel);
3563 kfree(dev->caps.qp0_proxy);
3564 kfree(dev->caps.qp1_tunnel);
3565 kfree(dev->caps.qp1_proxy);
3569 mlx4_close_hca(dev);
3575 if (mlx4_is_slave(dev))
3576 mlx4_multi_func_cleanup(dev);
3579 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3582 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
3583 pci_disable_sriov(pdev);
3584 dev->flags &= ~MLX4_FLAG_SRIOV;
3587 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3588 atomic_dec(&pf_loading);
3590 kfree(priv->dev.dev_vfs);
3592 if (!mlx4_is_slave(dev))
3593 mlx4_free_ownership(dev);
3599 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3600 struct mlx4_priv *priv)
3603 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3604 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3605 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
3606 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
3607 unsigned total_vfs = 0;
3610 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3612 err = mlx4_pci_enable_device(&priv->dev);
3614 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3618 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
3619 * per port, we must limit the number of VFs to 63 (since their are
3622 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
3623 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
3624 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
3626 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
3628 goto err_disable_pdev;
3631 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
3633 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
3634 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
3635 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3637 goto err_disable_pdev;
3640 if (total_vfs > MLX4_MAX_NUM_VF) {
3642 "Requested more VF's (%d) than allowed by hw (%d)\n",
3643 total_vfs, MLX4_MAX_NUM_VF);
3645 goto err_disable_pdev;
3648 for (i = 0; i < MLX4_MAX_PORTS; i++) {
3649 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
3651 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
3652 nvfs[i] + nvfs[2], i + 1,
3653 MLX4_MAX_NUM_VF_P_PORT);
3655 goto err_disable_pdev;
3659 /* Check for BARs. */
3660 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
3661 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3662 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3663 pci_dev_data, (long)pci_resource_flags(pdev, 0));
3665 goto err_disable_pdev;
3667 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
3668 dev_err(&pdev->dev, "Missing UAR, aborting\n");
3670 goto err_disable_pdev;
3673 err = pci_request_regions(pdev, DRV_NAME);
3675 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
3676 goto err_disable_pdev;
3679 pci_set_master(pdev);
3681 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3683 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
3684 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3686 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
3687 goto err_release_regions;
3690 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3692 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
3693 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3695 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
3696 goto err_release_regions;
3700 /* Allow large DMA segments, up to the firmware limit of 1 GB */
3701 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
3702 /* Detect if this device is a virtual function */
3703 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3704 /* When acting as pf, we normally skip vfs unless explicitly
3705 * requested to probe them.
3708 unsigned vfs_offset = 0;
3710 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
3711 vfs_offset + nvfs[i] < extended_func_num(pdev);
3712 vfs_offset += nvfs[i], i++)
3714 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
3716 goto err_release_regions;
3718 if ((extended_func_num(pdev) - vfs_offset)
3720 dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
3721 extended_func_num(pdev));
3723 goto err_release_regions;
3728 err = mlx4_catas_init(&priv->dev);
3730 goto err_release_regions;
3732 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
3739 mlx4_catas_end(&priv->dev);
3741 err_release_regions:
3742 pci_release_regions(pdev);
3745 mlx4_pci_disable_device(&priv->dev);
3746 pci_set_drvdata(pdev, NULL);
3750 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3753 struct sysctl_ctx_list *ctx;
3754 struct sysctl_oid *node;
3755 struct sysctl_oid_list *node_list;
3756 struct mlx4_priv *priv;
3757 struct mlx4_dev *dev;
3760 printk_once(KERN_INFO "%s", mlx4_version);
3762 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
3767 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
3768 if (!dev->persist) {
3772 dev->persist->pdev = pdev;
3773 dev->persist->dev = dev;
3774 pci_set_drvdata(pdev, dev->persist);
3775 priv->pci_dev_data = id->driver_data;
3776 mutex_init(&dev->persist->device_state_mutex);
3777 mutex_init(&dev->persist->interface_state_mutex);
3778 mutex_init(&dev->persist->pci_status_mutex);
3780 ret = __mlx4_init_one(pdev, id->driver_data, priv);
3782 kfree(dev->persist);
3786 device_set_desc(pdev->dev.bsddev, mlx4_description);
3787 pci_save_state(pdev->dev.bsddev);
3790 snprintf(dev->fw_str, sizeof(dev->fw_str), "%d.%d.%d",
3791 (int) (dev->caps.fw_ver >> 32),
3792 (int) (dev->caps.fw_ver >> 16) & 0xffff,
3793 (int) (dev->caps.fw_ver & 0xffff));
3796 sysctl_ctx_init(ctx);
3797 node = SYSCTL_ADD_NODE(ctx,SYSCTL_CHILDREN(pdev->dev.kobj.oidp),
3798 OID_AUTO, "hw" , CTLFLAG_RD, 0, "mlx4 dev hw information");
3800 node_list = SYSCTL_CHILDREN(node);
3801 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO,
3802 "fw_version", CTLFLAG_RD, dev->fw_str, 0,
3803 "Device firmware version");
3804 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO,
3805 "board_id", CTLFLAG_RD, dev->board_id, 0,
3806 "Device board identifier");
3812 static void mlx4_clean_dev(struct mlx4_dev *dev)
3814 struct mlx4_dev_persistent *persist = dev->persist;
3815 struct mlx4_priv *priv = mlx4_priv(dev);
3816 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
3818 memset(priv, 0, sizeof(*priv));
3819 priv->dev.persist = persist;
3820 priv->dev.flags = flags;
3823 static void mlx4_unload_one(struct pci_dev *pdev)
3825 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3826 struct mlx4_dev *dev = persist->dev;
3827 struct mlx4_priv *priv = mlx4_priv(dev);
3834 /* saving current ports type for further use */
3835 for (i = 0; i < dev->caps.num_ports; i++) {
3836 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
3837 dev->persist->curr_port_poss_type[i] = dev->caps.
3838 possible_type[i + 1];
3841 pci_dev_data = priv->pci_dev_data;
3843 mlx4_stop_sense(dev);
3844 mlx4_unregister_device(dev);
3846 for (p = 1; p <= dev->caps.num_ports; p++) {
3847 mlx4_cleanup_port_info(&priv->port[p]);
3848 mlx4_CLOSE_PORT(dev, p);
3851 if (mlx4_is_master(dev))
3852 mlx4_free_resource_tracker(dev,
3853 RES_TR_FREE_SLAVES_ONLY);
3855 mlx4_cleanup_default_counters(dev);
3856 if (!mlx4_is_slave(dev))
3857 mlx4_cleanup_counters_table(dev);
3858 mlx4_cleanup_qp_table(dev);
3859 mlx4_cleanup_srq_table(dev);
3860 mlx4_cleanup_cq_table(dev);
3861 mlx4_cmd_use_polling(dev);
3862 mlx4_cleanup_eq_table(dev);
3863 mlx4_cleanup_mcg_table(dev);
3864 mlx4_cleanup_mr_table(dev);
3865 mlx4_cleanup_xrcd_table(dev);
3866 mlx4_cleanup_pd_table(dev);
3868 if (mlx4_is_master(dev))
3869 mlx4_free_resource_tracker(dev,
3870 RES_TR_FREE_STRUCTS_ONLY);
3873 mlx4_uar_free(dev, &priv->driver_uar);
3874 mlx4_cleanup_uar_table(dev);
3875 if (!mlx4_is_slave(dev))
3876 mlx4_clear_steering(dev);
3877 mlx4_free_eq_table(dev);
3878 if (mlx4_is_master(dev))
3879 mlx4_multi_func_cleanup(dev);
3880 mlx4_close_hca(dev);
3882 if (mlx4_is_slave(dev))
3883 mlx4_multi_func_cleanup(dev);
3884 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3886 if (dev->flags & MLX4_FLAG_MSI_X)
3887 pci_disable_msix(pdev);
3889 if (!mlx4_is_slave(dev))
3890 mlx4_free_ownership(dev);
3892 kfree(dev->caps.qp0_qkey);
3893 kfree(dev->caps.qp0_tunnel);
3894 kfree(dev->caps.qp0_proxy);
3895 kfree(dev->caps.qp1_tunnel);
3896 kfree(dev->caps.qp1_proxy);
3897 kfree(dev->dev_vfs);
3899 mlx4_clean_dev(dev);
3900 priv->pci_dev_data = pci_dev_data;
3904 static void mlx4_remove_one(struct pci_dev *pdev)
3906 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3907 struct mlx4_dev *dev = persist->dev;
3908 struct mlx4_priv *priv = mlx4_priv(dev);
3911 mutex_lock(&persist->interface_state_mutex);
3912 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
3913 mutex_unlock(&persist->interface_state_mutex);
3916 * Clear the device description to avoid use after free,
3917 * because the bsddev is not destroyed when this module is
3920 device_set_desc(pdev->dev.bsddev, NULL);
3922 /* Disabling SR-IOV is not allowed while there are active vf's */
3923 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
3924 active_vfs = mlx4_how_many_lives_vf(dev);
3926 pr_warn("Removing PF when there are active VF's !!\n");
3927 pr_warn("Will not disable SR-IOV.\n");
3931 /* device marked to be under deletion running now without the lock
3932 * letting other tasks to be terminated
3934 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
3935 mlx4_unload_one(pdev);
3937 mlx4_info(dev, "%s: interface is down\n", __func__);
3938 mlx4_catas_end(dev);
3939 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3940 mlx4_warn(dev, "Disabling SR-IOV\n");
3941 pci_disable_sriov(pdev);
3944 pci_release_regions(pdev);
3945 pci_disable_device(pdev);
3946 kfree(dev->persist);
3948 pci_set_drvdata(pdev, NULL);
3951 static int restore_current_port_types(struct mlx4_dev *dev,
3952 enum mlx4_port_type *types,
3953 enum mlx4_port_type *poss_types)
3955 struct mlx4_priv *priv = mlx4_priv(dev);
3958 mlx4_stop_sense(dev);
3960 mutex_lock(&priv->port_mutex);
3961 for (i = 0; i < dev->caps.num_ports; i++)
3962 dev->caps.possible_type[i + 1] = poss_types[i];
3963 err = mlx4_change_port_types(dev, types);
3964 mutex_unlock(&priv->port_mutex);
3966 mlx4_start_sense(dev);
3971 int mlx4_restart_one(struct pci_dev *pdev)
3973 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
3974 struct mlx4_dev *dev = persist->dev;
3975 struct mlx4_priv *priv = mlx4_priv(dev);
3976 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3977 int pci_dev_data, err, total_vfs;
3979 pci_dev_data = priv->pci_dev_data;
3980 total_vfs = dev->persist->num_vfs;
3981 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
3983 mlx4_unload_one(pdev);
3984 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
3986 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
3987 __func__, pci_name(pdev), err);
3991 err = restore_current_port_types(dev, dev->persist->curr_port_type,
3992 dev->persist->curr_port_poss_type);
3994 mlx4_err(dev, "could not restore original port types (%d)\n",
4000 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
4001 /* MT25408 "Hermon" SDR */
4002 { PCI_VDEVICE(MELLANOX, 0x6340),
4003 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4004 /* MT25408 "Hermon" DDR */
4005 { PCI_VDEVICE(MELLANOX, 0x634a),
4006 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4007 /* MT25408 "Hermon" QDR */
4008 { PCI_VDEVICE(MELLANOX, 0x6354),
4009 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4010 /* MT25408 "Hermon" DDR PCIe gen2 */
4011 { PCI_VDEVICE(MELLANOX, 0x6732),
4012 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4013 /* MT25408 "Hermon" QDR PCIe gen2 */
4014 { PCI_VDEVICE(MELLANOX, 0x673c),
4015 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4016 /* MT25408 "Hermon" EN 10GigE */
4017 { PCI_VDEVICE(MELLANOX, 0x6368),
4018 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4019 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
4020 { PCI_VDEVICE(MELLANOX, 0x6750),
4021 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4022 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
4023 { PCI_VDEVICE(MELLANOX, 0x6372),
4024 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4025 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
4026 { PCI_VDEVICE(MELLANOX, 0x675a),
4027 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4028 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
4029 { PCI_VDEVICE(MELLANOX, 0x6764),
4030 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4031 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
4032 { PCI_VDEVICE(MELLANOX, 0x6746),
4033 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4034 /* MT26478 ConnectX2 40GigE PCIe gen2 */
4035 { PCI_VDEVICE(MELLANOX, 0x676e),
4036 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT },
4037 /* MT25400 Family [ConnectX-2 Virtual Function] */
4038 { PCI_VDEVICE(MELLANOX, 0x1002),
4039 .driver_data = MLX4_PCI_DEV_IS_VF },
4040 /* MT27500 Family [ConnectX-3] */
4041 { PCI_VDEVICE(MELLANOX, 0x1003) },
4042 /* MT27500 Family [ConnectX-3 Virtual Function] */
4043 { PCI_VDEVICE(MELLANOX, 0x1004),
4044 .driver_data = MLX4_PCI_DEV_IS_VF },
4045 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
4046 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
4047 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
4048 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
4049 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
4050 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
4051 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
4052 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
4053 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
4054 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
4055 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
4056 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
4060 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
4062 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
4063 pci_channel_state_t state)
4065 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4067 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
4068 mlx4_enter_error_state(persist);
4070 mutex_lock(&persist->interface_state_mutex);
4071 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4072 mlx4_unload_one(pdev);
4074 mutex_unlock(&persist->interface_state_mutex);
4075 if (state == pci_channel_io_perm_failure)
4076 return PCI_ERS_RESULT_DISCONNECT;
4078 mlx4_pci_disable_device(persist->dev);
4079 return PCI_ERS_RESULT_NEED_RESET;
4082 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
4084 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4085 struct mlx4_dev *dev = persist->dev;
4088 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
4089 err = mlx4_pci_enable_device(dev);
4091 mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
4092 return PCI_ERS_RESULT_DISCONNECT;
4095 pci_set_master(pdev);
4096 return PCI_ERS_RESULT_RECOVERED;
4099 static void mlx4_pci_resume(struct pci_dev *pdev)
4101 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4102 struct mlx4_dev *dev = persist->dev;
4103 struct mlx4_priv *priv = mlx4_priv(dev);
4104 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4108 mlx4_err(dev, "%s was called\n", __func__);
4109 total_vfs = dev->persist->num_vfs;
4110 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4112 mutex_lock(&persist->interface_state_mutex);
4113 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4114 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
4117 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
4122 err = restore_current_port_types(dev, dev->persist->
4123 curr_port_type, dev->persist->
4124 curr_port_poss_type);
4126 mlx4_err(dev, "could not restore original port types (%d)\n", err);
4129 mutex_unlock(&persist->interface_state_mutex);
4133 static void mlx4_shutdown(struct pci_dev *pdev)
4135 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4137 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
4138 mutex_lock(&persist->interface_state_mutex);
4139 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4140 mlx4_unload_one(pdev);
4141 mutex_unlock(&persist->interface_state_mutex);
4144 static const struct pci_error_handlers mlx4_err_handler = {
4145 .error_detected = mlx4_pci_err_detected,
4146 .slot_reset = mlx4_pci_slot_reset,
4147 .resume = mlx4_pci_resume,
4150 static struct pci_driver mlx4_driver = {
4152 .id_table = mlx4_pci_table,
4153 .probe = mlx4_init_one,
4154 .shutdown = mlx4_shutdown,
4155 .remove = mlx4_remove_one,
4156 .err_handler = &mlx4_err_handler,
4159 static int __init mlx4_verify_params(void)
4161 if ((log_num_mac < 0) || (log_num_mac > 7)) {
4162 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
4166 if (log_num_vlan != 0)
4167 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
4168 MLX4_LOG_NUM_VLANS);
4171 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
4173 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
4174 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
4179 /* Check if module param for ports type has legal combination */
4180 if (port_type_array[0] == false && port_type_array[1] == true) {
4181 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
4182 port_type_array[0] = true;
4185 if (mlx4_log_num_mgm_entry_size < -7 ||
4186 (mlx4_log_num_mgm_entry_size > 0 &&
4187 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
4188 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
4189 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
4190 mlx4_log_num_mgm_entry_size,
4191 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
4192 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
4199 static int __init mlx4_init(void)
4203 if (mlx4_verify_params())
4207 mlx4_wq = create_singlethread_workqueue("mlx4");
4211 ret = pci_register_driver(&mlx4_driver);
4213 destroy_workqueue(mlx4_wq);
4214 return ret < 0 ? ret : 0;
4217 static void __exit mlx4_cleanup(void)
4219 pci_unregister_driver(&mlx4_driver);
4220 destroy_workqueue(mlx4_wq);
4223 module_init(mlx4_init);
4224 module_exit(mlx4_cleanup);
4227 mlx4_evhand(module_t mod, int event, void *arg)
4232 static moduledata_t mlx4_mod = {
4234 .evhand = mlx4_evhand,
4236 MODULE_VERSION(mlx4, 1);
4237 DECLARE_MODULE(mlx4, mlx4_mod, SI_SUB_OFED_PREINIT, SI_ORDER_ANY);
4238 MODULE_DEPEND(mlx4, linuxkpi, 1, 1, 1);