]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/ofed/drivers/net/mlx4/main.c
Copy head (r256279) to stable/10 as part of the 10.0-RELEASE cycle.
[FreeBSD/stable/10.git] / sys / ofed / drivers / net / mlx4 / main.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/netdevice.h>
45
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/doorbell.h>
48
49 #include "mlx4.h"
50 #include "fw.h"
51 #include "icm.h"
52
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRV_VERSION);
57
58 struct workqueue_struct *mlx4_wq;
59
60 #ifdef CONFIG_MLX4_DEBUG
61
62 int mlx4_debug_level = 0;
63 module_param_named(debug_level, mlx4_debug_level, int, 0644);
64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
65
66 #endif /* CONFIG_MLX4_DEBUG */
67
68 #ifdef CONFIG_PCI_MSI
69
70 static int msi_x = 1;
71 module_param(msi_x, int, 0444);
72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
73
74 #else /* CONFIG_PCI_MSI */
75
76 #define msi_x (0)
77
78 #endif /* CONFIG_PCI_MSI */
79
80 static int enable_sys_tune = 0;
81 module_param(enable_sys_tune, int, 0444);
82 MODULE_PARM_DESC(enable_sys_tune, "Tune the cpu's for better performance (default 0)");
83
84 int mlx4_blck_lb = 1;
85 module_param_named(block_loopback, mlx4_blck_lb, int, 0644);
86 MODULE_PARM_DESC(block_loopback, "Block multicast loopback packets if > 0 "
87                                  "(default: 1)");
88
89 static int num_vfs;
90 module_param(num_vfs, int, 0444);
91 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
92
93 static int probe_vf;
94 module_param(probe_vf, int, 0644);
95 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
96
97 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
98
99 module_param_named(log_num_mgm_entry_size,
100                         mlx4_log_num_mgm_entry_size, int, 0444);
101 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
102                                          " of qp per mcg, for example:"
103                                          " 10 gives 248.range: 7 <="
104                                          " log_num_mgm_entry_size <= 12."
105                                          " To activate device managed"
106                                          " flow steering when available, set to -1");
107
108 static int high_rate_steer;
109 module_param(high_rate_steer, int, 0444);
110 MODULE_PARM_DESC(high_rate_steer, "Enable steering mode for higher packet rate"
111                                   " (default off)");
112
113 static int fast_drop;
114 module_param_named(fast_drop, fast_drop, int, 0444);
115 MODULE_PARM_DESC(fast_drop,
116                  "Enable fast packet drop when no recieve WQEs are posted");
117
118 int mlx4_enable_64b_cqe_eqe;
119 module_param_named(enable_64b_cqe_eqe, mlx4_enable_64b_cqe_eqe, int, 0644);
120 MODULE_PARM_DESC(enable_64b_cqe_eqe,
121                  "Enable 64 byte CQEs/EQEs when the the FW supports this, if nonzero");
122
123 #define HCA_GLOBAL_CAP_MASK            0
124
125 #define PF_CONTEXT_BEHAVIOUR_MASK       MLX4_FUNC_CAP_64B_EQE_CQE
126
127 static char mlx4_version[] __devinitdata =
128         DRV_NAME ": Mellanox ConnectX core driver v"
129         DRV_VERSION " (" DRV_RELDATE ")\n";
130
131 static int log_num_mac = 7;
132 module_param_named(log_num_mac, log_num_mac, int, 0444);
133 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
134
135 static int log_num_vlan;
136 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
137 MODULE_PARM_DESC(log_num_vlan,
138         "(Obsolete) Log2 max number of VLANs per ETH port (0-7)");
139 /* Log2 max number of VLANs per ETH port (0-7) */
140 #define MLX4_LOG_NUM_VLANS 7
141
142 int log_mtts_per_seg = ilog2(1);
143 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
144 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment "
145                  "(0-7) (default: 0)");
146
147 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
148 #if 0 
149 static int arr_argc = 2;
150 module_param_array(port_type_array, int, &arr_argc, 0444);
151 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
152                                 "1 for IB, 2 for Ethernet");
153 #endif
154
155 struct mlx4_port_config {
156         struct list_head list;
157         enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
158         struct pci_dev *pdev;
159 };
160
161 #define MLX4_LOG_NUM_MTT 20
162 /* We limit to 30 as of a bit map issue which uses int and not uint.
163      see mlx4_buddy_init -> bitmap_zero which gets int.
164 */
165 #define MLX4_MAX_LOG_NUM_MTT 30
166 static struct mlx4_profile mod_param_profile = {
167         .num_qp         = 19,
168         .num_srq        = 16,
169         .rdmarc_per_qp  = 4,
170         .num_cq         = 16,
171         .num_mcg        = 13,
172         .num_mpt        = 19,
173         .num_mtt        = 0, /* max(20, 2*MTTs for host memory)) */
174 };
175
176 module_param_named(log_num_qp, mod_param_profile.num_qp, int, 0444);
177 MODULE_PARM_DESC(log_num_qp, "log maximum number of QPs per HCA (default: 19)");
178
179 module_param_named(log_num_srq, mod_param_profile.num_srq, int, 0444);
180 MODULE_PARM_DESC(log_num_srq, "log maximum number of SRQs per HCA "
181                  "(default: 16)");
182
183 module_param_named(log_rdmarc_per_qp, mod_param_profile.rdmarc_per_qp, int,
184                    0444);
185 MODULE_PARM_DESC(log_rdmarc_per_qp, "log number of RDMARC buffers per QP "
186                  "(default: 4)");
187
188 module_param_named(log_num_cq, mod_param_profile.num_cq, int, 0444);
189 MODULE_PARM_DESC(log_num_cq, "log maximum number of CQs per HCA (default: 16)");
190
191 module_param_named(log_num_mcg, mod_param_profile.num_mcg, int, 0444);
192 MODULE_PARM_DESC(log_num_mcg, "log maximum number of multicast groups per HCA "
193                  "(default: 13)");
194
195 module_param_named(log_num_mpt, mod_param_profile.num_mpt, int, 0444);
196 MODULE_PARM_DESC(log_num_mpt,
197                  "log maximum number of memory protection table entries per "
198                  "HCA (default: 19)");
199
200 module_param_named(log_num_mtt, mod_param_profile.num_mtt, int, 0444);
201 MODULE_PARM_DESC(log_num_mtt,
202                  "log maximum number of memory translation table segments per "
203                  "HCA (default: max(20, 2*MTTs for register all of the host memory limited to 30))");
204
205 enum {
206         MLX4_IF_STATE_BASIC,
207         MLX4_IF_STATE_EXTENDED
208 };
209 static void process_mod_param_profile(struct mlx4_profile *profile)
210 {
211
212         vm_size_t hwphyssz;
213         hwphyssz = 0;
214         TUNABLE_ULONG_FETCH("hw.realmem", (u_long *) &hwphyssz);
215
216         profile->num_qp        = 1 << mod_param_profile.num_qp;
217         profile->num_srq       = 1 << mod_param_profile.num_srq;
218         profile->rdmarc_per_qp = 1 << mod_param_profile.rdmarc_per_qp;
219         profile->num_cq        = 1 << mod_param_profile.num_cq;
220         profile->num_mcg       = 1 << mod_param_profile.num_mcg;
221         profile->num_mpt       = 1 << mod_param_profile.num_mpt;
222         /*
223          * We want to scale the number of MTTs with the size of the
224          * system memory, since it makes sense to register a lot of
225          * memory on a system with a lot of memory.  As a heuristic,
226          * make sure we have enough MTTs to register twice the system
227          * memory (with PAGE_SIZE entries).
228          *
229          * This number has to be a power of two and fit into 32 bits
230          * due to device limitations. We cap this at 2^30 as of bit map
231          * limitation to work with int instead of uint (mlx4_buddy_init -> bitmap_zero)
232          * That limits us to 4TB of memory registration per HCA with
233          * 4KB pages, which is probably OK for the next few months.
234          */
235         if (mod_param_profile.num_mtt)
236                 profile->num_mtt = 1 << mod_param_profile.num_mtt;
237         else {
238                 profile->num_mtt =
239                         roundup_pow_of_two(max_t(unsigned,
240                                                 1 << (MLX4_LOG_NUM_MTT - log_mtts_per_seg),
241                                                 min(1UL << 
242                                                 (MLX4_MAX_LOG_NUM_MTT -
243                                                 log_mtts_per_seg),
244                                                 (hwphyssz << 1)
245                                                 >> log_mtts_per_seg)));
246                 /* set the actual value, so it will be reflected to the user
247                    using the sysfs */
248                 mod_param_profile.num_mtt = ilog2(profile->num_mtt * (1 << log_mtts_per_seg));
249         }
250 }
251
252 int mlx4_check_port_params(struct mlx4_dev *dev,
253                            enum mlx4_port_type *port_type)
254 {
255         int i;
256
257         for (i = 0; i < dev->caps.num_ports - 1; i++) {
258                 if (port_type[i] != port_type[i + 1]) {
259                         if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
260                                 mlx4_err(dev, "Only same port types supported "
261                                          "on this HCA, aborting.\n");
262                                 return -EINVAL;
263                         }
264                 }
265         }
266
267         for (i = 0; i < dev->caps.num_ports; i++) {
268                 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
269                         mlx4_err(dev, "Requested port type for port %d is not "
270                                       "supported on this HCA\n", i + 1);
271                         return -EINVAL;
272                 }
273         }
274         return 0;
275 }
276
277 static void mlx4_set_port_mask(struct mlx4_dev *dev)
278 {
279         int i;
280
281         for (i = 1; i <= dev->caps.num_ports; ++i)
282                 dev->caps.port_mask[i] = dev->caps.port_type[i];
283 }
284
285 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
286 {
287         int err;
288         int i;
289
290         err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
291         if (err) {
292                 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
293                 return err;
294         }
295
296         if (dev_cap->min_page_sz > PAGE_SIZE) {
297                 mlx4_err(dev, "HCA minimum page size of %d bigger than "
298                          "kernel PAGE_SIZE of %d, aborting.\n",
299                          dev_cap->min_page_sz, PAGE_SIZE);
300                 return -ENODEV;
301         }
302         if (dev_cap->num_ports > MLX4_MAX_PORTS) {
303                 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
304                          "aborting.\n",
305                          dev_cap->num_ports, MLX4_MAX_PORTS);
306                 return -ENODEV;
307         }
308
309         if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
310                 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
311                          "PCI resource 2 size of 0x%llx, aborting.\n",
312                          dev_cap->uar_size,
313                          (unsigned long long) pci_resource_len(dev->pdev, 2));
314                 return -ENODEV;
315         }
316
317         dev->caps.num_ports          = dev_cap->num_ports;
318         dev->phys_caps.num_phys_eqs  = MLX4_MAX_EQ_NUM;
319         for (i = 1; i <= dev->caps.num_ports; ++i) {
320                 dev->caps.vl_cap[i]         = dev_cap->max_vl[i];
321                 dev->caps.ib_mtu_cap[i]     = dev_cap->ib_mtu[i];
322                 dev->phys_caps.gid_phys_table_len[i]  = dev_cap->max_gids[i];
323                 dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i];
324                 /* set gid and pkey table operating lengths by default
325                  * to non-sriov values */
326                 dev->caps.gid_table_len[i]  = dev_cap->max_gids[i];
327                 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
328                 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
329                 dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
330                 dev->caps.def_mac[i]        = dev_cap->def_mac[i];
331                 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
332                 dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
333                 dev->caps.default_sense[i] = dev_cap->default_sense[i];
334                 dev->caps.trans_type[i]     = dev_cap->trans_type[i];
335                 dev->caps.vendor_oui[i]     = dev_cap->vendor_oui[i];
336                 dev->caps.wavelength[i]     = dev_cap->wavelength[i];
337                 dev->caps.trans_code[i]     = dev_cap->trans_code[i];
338         }
339
340         dev->caps.uar_page_size      = PAGE_SIZE;
341         dev->caps.num_uars           = dev_cap->uar_size / PAGE_SIZE;
342         dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
343         dev->caps.bf_reg_size        = dev_cap->bf_reg_size;
344         dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
345         dev->caps.max_sq_sg          = dev_cap->max_sq_sg;
346         dev->caps.max_rq_sg          = dev_cap->max_rq_sg;
347         dev->caps.max_wqes           = dev_cap->max_qp_sz;
348         dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
349         dev->caps.max_srq_wqes       = dev_cap->max_srq_sz;
350         dev->caps.max_srq_sge        = dev_cap->max_rq_sg - 1;
351         dev->caps.reserved_srqs      = dev_cap->reserved_srqs;
352         dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
353         dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
354         /*
355          * Subtract 1 from the limit because we need to allocate a
356          * spare CQE to enable resizing the CQ
357          */
358         dev->caps.max_cqes           = dev_cap->max_cq_sz - 1;
359         dev->caps.reserved_cqs       = dev_cap->reserved_cqs;
360         dev->caps.reserved_eqs       = dev_cap->reserved_eqs;
361         dev->caps.reserved_mtts      = dev_cap->reserved_mtts;
362         dev->caps.reserved_mrws      = dev_cap->reserved_mrws;
363
364         /* The first 128 UARs are used for EQ doorbells */
365         dev->caps.reserved_uars      = max_t(int, 128, dev_cap->reserved_uars);
366         dev->caps.reserved_pds       = dev_cap->reserved_pds;
367         dev->caps.reserved_xrcds     = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
368                                         dev_cap->reserved_xrcds : 0;
369         dev->caps.max_xrcds          = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
370                                         dev_cap->max_xrcds : 0;
371         dev->caps.mtt_entry_sz       = dev_cap->mtt_entry_sz;
372
373         dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
374         dev->caps.page_size_cap      = ~(u32) (dev_cap->min_page_sz - 1);
375         dev->caps.flags              = dev_cap->flags;
376         dev->caps.flags2             = dev_cap->flags2;
377         dev->caps.bmme_flags         = dev_cap->bmme_flags;
378         dev->caps.reserved_lkey      = dev_cap->reserved_lkey;
379         dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
380         dev->caps.cq_timestamp       = dev_cap->timestamp_support;
381         dev->caps.max_gso_sz         = dev_cap->max_gso_sz;
382         dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
383
384         /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
385         if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
386                 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
387         /* Don't do sense port on multifunction devices (for now at least) */
388         if (mlx4_is_mfunc(dev))
389                 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
390
391         dev->caps.log_num_macs  = log_num_mac;
392         dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
393
394         dev->caps.fast_drop     = fast_drop ?
395                                   !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FAST_DROP) :
396                                   0;
397
398         for (i = 1; i <= dev->caps.num_ports; ++i) {
399                 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
400                 if (dev->caps.supported_type[i]) {
401                         /* if only ETH is supported - assign ETH */
402                         if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
403                                 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
404                         /* if only IB is supported, assign IB */
405                         else if (dev->caps.supported_type[i] ==
406                                  MLX4_PORT_TYPE_IB)
407                                 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
408                         else {
409                                 /* if IB and ETH are supported, we set the port
410                                  * type according to user selection of port type;
411                                  * if user selected none, take the FW hint */
412                                 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
413                                         dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
414                                                 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
415                                 else
416                                         dev->caps.port_type[i] = port_type_array[i - 1];
417                         }
418                 }
419                 /*
420                  * Link sensing is allowed on the port if 3 conditions are true:
421                  * 1. Both protocols are supported on the port.
422                  * 2. Different types are supported on the port
423                  * 3. FW declared that it supports link sensing
424                  */
425                 mlx4_priv(dev)->sense.sense_allowed[i] =
426                         ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
427                          (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
428                          (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
429
430                 /*
431                  * If "default_sense" bit is set, we move the port to "AUTO" mode
432                  * and perform sense_port FW command to try and set the correct
433                  * port type from beginning
434                  */
435                 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
436                         enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
437                         dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
438                         mlx4_SENSE_PORT(dev, i, &sensed_port);
439                         if (sensed_port != MLX4_PORT_TYPE_NONE)
440                                 dev->caps.port_type[i] = sensed_port;
441                 } else {
442                         dev->caps.possible_type[i] = dev->caps.port_type[i];
443                 }
444
445                 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
446                         dev->caps.log_num_macs = dev_cap->log_max_macs[i];
447                         mlx4_warn(dev, "Requested number of MACs is too much "
448                                   "for port %d, reducing to %d.\n",
449                                   i, 1 << dev->caps.log_num_macs);
450                 }
451                 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
452                         dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
453                         mlx4_warn(dev, "Requested number of VLANs is too much "
454                                   "for port %d, reducing to %d.\n",
455                                   i, 1 << dev->caps.log_num_vlans);
456                 }
457         }
458
459         dev->caps.max_basic_counters = dev_cap->max_basic_counters;
460         dev->caps.max_extended_counters = dev_cap->max_extended_counters;
461         /* support extended counters if available */
462         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS_EXT)
463                 dev->caps.max_counters = dev->caps.max_extended_counters;
464         else
465                 dev->caps.max_counters = dev->caps.max_basic_counters;
466
467         dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
468         dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
469                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
470                 (1 << dev->caps.log_num_macs) *
471                 (1 << dev->caps.log_num_vlans) *
472                 dev->caps.num_ports;
473         dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
474
475         dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
476                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
477                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
478                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
479
480         dev->caps.sync_qp = dev_cap->sync_qp;
481         dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
482
483         if (!mlx4_enable_64b_cqe_eqe) {
484                 if (dev_cap->flags &
485                     (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
486                         mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
487                         dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
488                         dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
489                 }
490         }
491
492         if ((dev->caps.flags &
493             (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
494             mlx4_is_master(dev))
495                 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
496
497         return 0;
498 }
499 /*The function checks if there are live vf, return the num of them*/
500 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
501 {
502         struct mlx4_priv *priv = mlx4_priv(dev);
503         struct mlx4_slave_state *s_state;
504         int i;
505         int ret = 0;
506
507         for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
508                 s_state = &priv->mfunc.master.slave_state[i];
509                 if (s_state->active && s_state->last_cmd !=
510                     MLX4_COMM_CMD_RESET) {
511                         mlx4_warn(dev, "%s: slave: %d is still active\n",
512                                   __func__, i);
513                         ret++;
514                 }
515         }
516         return ret;
517 }
518
519 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
520 {
521         u32 qk = MLX4_RESERVED_QKEY_BASE;
522
523         if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
524             qpn < dev->phys_caps.base_proxy_sqpn)
525                 return -EINVAL;
526
527         if (qpn >= dev->phys_caps.base_tunnel_sqpn)
528                 /* tunnel qp */
529                 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
530         else
531                 qk += qpn - dev->phys_caps.base_proxy_sqpn;
532         *qkey = qk;
533         return 0;
534 }
535 EXPORT_SYMBOL(mlx4_get_parav_qkey);
536
537 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
538 {
539         struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
540
541         if (!mlx4_is_master(dev))
542                 return;
543
544         priv->virt2phys_pkey[slave][port - 1][i] = val;
545 }
546 EXPORT_SYMBOL(mlx4_sync_pkey_table);
547
548 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
549 {
550         struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
551
552         if (!mlx4_is_master(dev))
553                 return;
554
555         priv->slave_node_guids[slave] = guid;
556 }
557 EXPORT_SYMBOL(mlx4_put_slave_node_guid);
558
559 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
560 {
561         struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
562
563         if (!mlx4_is_master(dev))
564                 return 0;
565
566         return priv->slave_node_guids[slave];
567 }
568 EXPORT_SYMBOL(mlx4_get_slave_node_guid);
569
570 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
571 {
572         struct mlx4_priv *priv = mlx4_priv(dev);
573         struct mlx4_slave_state *s_slave;
574
575         if (!mlx4_is_master(dev))
576                 return 0;
577
578         s_slave = &priv->mfunc.master.slave_state[slave];
579         return !!s_slave->active;
580 }
581 EXPORT_SYMBOL(mlx4_is_slave_active);
582
583 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
584                                        struct mlx4_dev_cap *dev_cap,
585                                        struct mlx4_init_hca_param *hca_param)
586 {
587         dev->caps.steering_mode = hca_param->steering_mode;
588         if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED)
589                 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
590         else
591                 dev->caps.num_qp_per_mgm =
592                         4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
593
594         mlx4_dbg(dev, "Steering mode is: %s\n",
595                  mlx4_steering_mode_str(dev->caps.steering_mode));
596 }
597
598 static int mlx4_slave_cap(struct mlx4_dev *dev)
599 {
600         int                        err;
601         u32                        page_size;
602         struct mlx4_dev_cap        dev_cap;
603         struct mlx4_func_cap       func_cap;
604         struct mlx4_init_hca_param hca_param;
605         int                        i;
606
607         memset(&hca_param, 0, sizeof(hca_param));
608         err = mlx4_QUERY_HCA(dev, &hca_param);
609         if (err) {
610                 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
611                 return err;
612         }
613
614         /*fail if the hca has an unknown capability */
615         if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
616             HCA_GLOBAL_CAP_MASK) {
617                 mlx4_err(dev, "Unknown hca global capabilities\n");
618                 return -ENOSYS;
619         }
620
621         mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
622
623         dev->caps.hca_core_clock = hca_param.hca_core_clock;
624
625         memset(&dev_cap, 0, sizeof(dev_cap));
626         dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
627         err = mlx4_dev_cap(dev, &dev_cap);
628         if (err) {
629                 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
630                 return err;
631         }
632
633         err = mlx4_QUERY_FW(dev);
634         if (err)
635                 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
636
637         page_size = ~dev->caps.page_size_cap + 1;
638         mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
639         if (page_size > PAGE_SIZE) {
640                 mlx4_err(dev, "HCA minimum page size of %d bigger than "
641                          "kernel PAGE_SIZE of %d, aborting.\n",
642                          page_size, PAGE_SIZE);
643                 return -ENODEV;
644         }
645
646         /* slave gets uar page size from QUERY_HCA fw command */
647         dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
648
649         /* TODO: relax this assumption */
650         if (dev->caps.uar_page_size != PAGE_SIZE) {
651                 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %d\n",
652                          dev->caps.uar_page_size, PAGE_SIZE);
653                 return -ENODEV;
654         }
655
656         memset(&func_cap, 0, sizeof(func_cap));
657         err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
658         if (err) {
659                 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
660                           err);
661                 return err;
662         }
663
664         if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
665             PF_CONTEXT_BEHAVIOUR_MASK) {
666                 mlx4_err(dev, "Unknown pf context behaviour\n");
667                 return -ENOSYS;
668         }
669
670         dev->caps.num_ports             = func_cap.num_ports;
671         dev->quotas.qp                  = func_cap.qp_quota;
672         dev->quotas.srq                 = func_cap.srq_quota;
673         dev->quotas.cq                  = func_cap.cq_quota;
674         dev->quotas.mpt                 = func_cap.mpt_quota;
675         dev->quotas.mtt                 = func_cap.mtt_quota;
676         dev->caps.num_qps               = 1 << hca_param.log_num_qps;
677         dev->caps.num_srqs              = 1 << hca_param.log_num_srqs;
678         dev->caps.num_cqs               = 1 << hca_param.log_num_cqs;
679         dev->caps.num_mpts              = 1 << hca_param.log_mpt_sz;
680         dev->caps.num_eqs               = func_cap.max_eq;
681         dev->caps.reserved_eqs          = func_cap.reserved_eq;
682         dev->caps.num_pds               = MLX4_NUM_PDS;
683         dev->caps.num_mgms              = 0;
684         dev->caps.num_amgms             = 0;
685
686         if (dev->caps.num_ports > MLX4_MAX_PORTS) {
687                 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
688                          "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
689                 return -ENODEV;
690         }
691
692         dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
693         dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
694         dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
695         dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
696
697         if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
698             !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
699                 err = -ENOMEM;
700                 goto err_mem;
701         }
702
703         for (i = 1; i <= dev->caps.num_ports; ++i) {
704                 err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
705                 if (err) {
706                         mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
707                                  " port %d, aborting (%d).\n", i, err);
708                         goto err_mem;
709                 }
710                 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
711                 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
712                 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
713                 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
714                 dev->caps.port_mask[i] = dev->caps.port_type[i];
715                 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
716                                                       &dev->caps.gid_table_len[i],
717                                                       &dev->caps.pkey_table_len[i]);
718                 if (err)
719                         goto err_mem;
720         }
721
722         if (dev->caps.uar_page_size * (dev->caps.num_uars -
723                                        dev->caps.reserved_uars) >
724                                        pci_resource_len(dev->pdev, 2)) {
725                 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
726                          "PCI resource 2 size of 0x%llx, aborting.\n",
727                          dev->caps.uar_page_size * dev->caps.num_uars,
728                          (unsigned long long) pci_resource_len(dev->pdev, 2));
729                 err = -ENOMEM;
730                 goto err_mem;
731         }
732
733         if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
734                 dev->caps.eqe_size   = 64;
735                 dev->caps.eqe_factor = 1;
736         } else {
737                 dev->caps.eqe_size   = 32;
738                 dev->caps.eqe_factor = 0;
739         }
740
741         if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
742                 dev->caps.cqe_size   = 64;
743                 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE;
744         } else {
745                 dev->caps.cqe_size   = 32;
746         }
747
748         slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
749
750         return 0;
751
752 err_mem:
753         kfree(dev->caps.qp0_tunnel);
754         kfree(dev->caps.qp0_proxy);
755         kfree(dev->caps.qp1_tunnel);
756         kfree(dev->caps.qp1_proxy);
757         dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
758                 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
759
760         return err;
761 }
762
763 /*
764  * Change the port configuration of the device.
765  * Every user of this function must hold the port mutex.
766  */
767 int mlx4_change_port_types(struct mlx4_dev *dev,
768                            enum mlx4_port_type *port_types)
769 {
770         int err = 0;
771         int change = 0;
772         int port;
773
774         for (port = 0; port <  dev->caps.num_ports; port++) {
775                 /* Change the port type only if the new type is different
776                  * from the current, and not set to Auto */
777                 if (port_types[port] != dev->caps.port_type[port + 1])
778                         change = 1;
779         }
780         if (change) {
781                 mlx4_unregister_device(dev);
782                 for (port = 1; port <= dev->caps.num_ports; port++) {
783                         mlx4_CLOSE_PORT(dev, port);
784                         dev->caps.port_type[port] = port_types[port - 1];
785                         err = mlx4_SET_PORT(dev, port, -1);
786                         if (err) {
787                                 mlx4_err(dev, "Failed to set port %d, "
788                                               "aborting\n", port);
789                                 goto out;
790                         }
791                 }
792                 mlx4_set_port_mask(dev);
793                 err = mlx4_register_device(dev);
794         }
795
796 out:
797         return err;
798 }
799
800 static ssize_t show_port_type(struct device *dev,
801                               struct device_attribute *attr,
802                               char *buf)
803 {
804         struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
805                                                    port_attr);
806         struct mlx4_dev *mdev = info->dev;
807         char type[8];
808
809         sprintf(type, "%s",
810                 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
811                 "ib" : "eth");
812         if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
813                 sprintf(buf, "auto (%s)\n", type);
814         else
815                 sprintf(buf, "%s\n", type);
816
817         return strlen(buf);
818 }
819
820 static ssize_t set_port_type(struct device *dev,
821                              struct device_attribute *attr,
822                              const char *buf, size_t count)
823 {
824         struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
825                                                    port_attr);
826         struct mlx4_dev *mdev = info->dev;
827         struct mlx4_priv *priv = mlx4_priv(mdev);
828         enum mlx4_port_type types[MLX4_MAX_PORTS];
829         enum mlx4_port_type new_types[MLX4_MAX_PORTS];
830         int i;
831         int err = 0;
832
833         if (!strcmp(buf, "ib\n"))
834                 info->tmp_type = MLX4_PORT_TYPE_IB;
835         else if (!strcmp(buf, "eth\n"))
836                 info->tmp_type = MLX4_PORT_TYPE_ETH;
837         else if (!strcmp(buf, "auto\n"))
838                 info->tmp_type = MLX4_PORT_TYPE_AUTO;
839         else {
840                 mlx4_err(mdev, "%s is not supported port type\n", buf);
841                 return -EINVAL;
842         }
843
844         mlx4_stop_sense(mdev);
845         mutex_lock(&priv->port_mutex);
846         /* Possible type is always the one that was delivered */
847         mdev->caps.possible_type[info->port] = info->tmp_type;
848
849         for (i = 0; i < mdev->caps.num_ports; i++) {
850                 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
851                                         mdev->caps.possible_type[i+1];
852                 if (types[i] == MLX4_PORT_TYPE_AUTO)
853                         types[i] = mdev->caps.port_type[i+1];
854         }
855
856         if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
857             !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
858                 for (i = 1; i <= mdev->caps.num_ports; i++) {
859                         if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
860                                 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
861                                 err = -EINVAL;
862                         }
863                 }
864         }
865         if (err) {
866                 mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
867                                "Set only 'eth' or 'ib' for both ports "
868                                "(should be the same)\n");
869                 goto out;
870         }
871
872         mlx4_do_sense_ports(mdev, new_types, types);
873
874         err = mlx4_check_port_params(mdev, new_types);
875         if (err)
876                 goto out;
877
878         /* We are about to apply the changes after the configuration
879          * was verified, no need to remember the temporary types
880          * any more */
881         for (i = 0; i < mdev->caps.num_ports; i++)
882                 priv->port[i + 1].tmp_type = 0;
883
884         err = mlx4_change_port_types(mdev, new_types);
885
886 out:
887         mlx4_start_sense(mdev);
888         mutex_unlock(&priv->port_mutex);
889         return err ? err : count;
890 }
891
892 enum ibta_mtu {
893         IB_MTU_256  = 1,
894         IB_MTU_512  = 2,
895         IB_MTU_1024 = 3,
896         IB_MTU_2048 = 4,
897         IB_MTU_4096 = 5
898 };
899
900 static inline int int_to_ibta_mtu(int mtu)
901 {
902         switch (mtu) {
903         case 256:  return IB_MTU_256;
904         case 512:  return IB_MTU_512;
905         case 1024: return IB_MTU_1024;
906         case 2048: return IB_MTU_2048;
907         case 4096: return IB_MTU_4096;
908         default: return -1;
909         }
910 }
911
912 static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
913 {
914         switch (mtu) {
915         case IB_MTU_256:  return  256;
916         case IB_MTU_512:  return  512;
917         case IB_MTU_1024: return 1024;
918         case IB_MTU_2048: return 2048;
919         case IB_MTU_4096: return 4096;
920         default: return -1;
921         }
922 }
923
924 static ssize_t show_port_ib_mtu(struct device *dev,
925                              struct device_attribute *attr,
926                              char *buf)
927 {
928         struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
929                                                    port_mtu_attr);
930         struct mlx4_dev *mdev = info->dev;
931
932         if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
933                 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
934
935         sprintf(buf, "%d\n",
936                         ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
937         return strlen(buf);
938 }
939
940 static ssize_t set_port_ib_mtu(struct device *dev,
941                              struct device_attribute *attr,
942                              const char *buf, size_t count)
943 {
944         struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
945                                                    port_mtu_attr);
946         struct mlx4_dev *mdev = info->dev;
947         struct mlx4_priv *priv = mlx4_priv(mdev);
948         int err, port, mtu, ibta_mtu = -1;
949
950         if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
951                 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
952                 return -EINVAL;
953         }
954
955         mtu = (int) simple_strtol(buf, NULL, 0);
956         ibta_mtu = int_to_ibta_mtu(mtu);
957
958         if (ibta_mtu < 0) {
959                 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
960                 return -EINVAL;
961         }
962
963         mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
964
965         mlx4_stop_sense(mdev);
966         mutex_lock(&priv->port_mutex);
967         mlx4_unregister_device(mdev);
968         for (port = 1; port <= mdev->caps.num_ports; port++) {
969                 mlx4_CLOSE_PORT(mdev, port);
970                 err = mlx4_SET_PORT(mdev, port, -1);
971                 if (err) {
972                         mlx4_err(mdev, "Failed to set port %d, "
973                                       "aborting\n", port);
974                         goto err_set_port;
975                 }
976         }
977         err = mlx4_register_device(mdev);
978 err_set_port:
979         mutex_unlock(&priv->port_mutex);
980         mlx4_start_sense(mdev);
981         return err ? err : count;
982 }
983
984 static int mlx4_load_fw(struct mlx4_dev *dev)
985 {
986         struct mlx4_priv *priv = mlx4_priv(dev);
987         int err;
988
989         priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
990                                          GFP_HIGHUSER | __GFP_NOWARN, 0);
991         if (!priv->fw.fw_icm) {
992                 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
993                 return -ENOMEM;
994         }
995
996         err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
997         if (err) {
998                 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
999                 goto err_free;
1000         }
1001
1002         err = mlx4_RUN_FW(dev);
1003         if (err) {
1004                 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
1005                 goto err_unmap_fa;
1006         }
1007
1008         return 0;
1009
1010 err_unmap_fa:
1011         mlx4_UNMAP_FA(dev);
1012
1013 err_free:
1014         mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1015         return err;
1016 }
1017
1018 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1019                                 int cmpt_entry_sz)
1020 {
1021         struct mlx4_priv *priv = mlx4_priv(dev);
1022         int err;
1023         int num_eqs;
1024
1025         err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1026                                   cmpt_base +
1027                                   ((u64) (MLX4_CMPT_TYPE_QP *
1028                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1029                                   cmpt_entry_sz, dev->caps.num_qps,
1030                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1031                                   0, 0);
1032         if (err)
1033                 goto err;
1034
1035         err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1036                                   cmpt_base +
1037                                   ((u64) (MLX4_CMPT_TYPE_SRQ *
1038                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1039                                   cmpt_entry_sz, dev->caps.num_srqs,
1040                                   dev->caps.reserved_srqs, 0, 0);
1041         if (err)
1042                 goto err_qp;
1043
1044         err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1045                                   cmpt_base +
1046                                   ((u64) (MLX4_CMPT_TYPE_CQ *
1047                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1048                                   cmpt_entry_sz, dev->caps.num_cqs,
1049                                   dev->caps.reserved_cqs, 0, 0);
1050         if (err)
1051                 goto err_srq;
1052
1053         num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
1054                   dev->caps.num_eqs;
1055         err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1056                                   cmpt_base +
1057                                   ((u64) (MLX4_CMPT_TYPE_EQ *
1058                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1059                                   cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1060         if (err)
1061                 goto err_cq;
1062
1063         return 0;
1064
1065 err_cq:
1066         mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1067
1068 err_srq:
1069         mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1070
1071 err_qp:
1072         mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1073
1074 err:
1075         return err;
1076 }
1077
1078 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1079                          struct mlx4_init_hca_param *init_hca, u64 icm_size)
1080 {
1081         struct mlx4_priv *priv = mlx4_priv(dev);
1082         u64 aux_pages;
1083         int num_eqs;
1084         int err;
1085
1086         err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1087         if (err) {
1088                 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
1089                 return err;
1090         }
1091
1092         mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
1093                  (unsigned long long) icm_size >> 10,
1094                  (unsigned long long) aux_pages << 2);
1095
1096         priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1097                                           GFP_HIGHUSER | __GFP_NOWARN, 0);
1098         if (!priv->fw.aux_icm) {
1099                 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
1100                 return -ENOMEM;
1101         }
1102
1103         err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1104         if (err) {
1105                 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
1106                 goto err_free_aux;
1107         }
1108
1109         err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1110         if (err) {
1111                 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
1112                 goto err_unmap_aux;
1113         }
1114
1115
1116         num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
1117                    dev->caps.num_eqs;
1118         err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1119                                   init_hca->eqc_base, dev_cap->eqc_entry_sz,
1120                                   num_eqs, num_eqs, 0, 0);
1121         if (err) {
1122                 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
1123                 goto err_unmap_cmpt;
1124         }
1125
1126         /*
1127          * Reserved MTT entries must be aligned up to a cacheline
1128          * boundary, since the FW will write to them, while the driver
1129          * writes to all other MTT entries. (The variable
1130          * dev->caps.mtt_entry_sz below is really the MTT segment
1131          * size, not the raw entry size)
1132          */
1133         dev->caps.reserved_mtts =
1134                 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1135                       dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1136
1137         err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1138                                   init_hca->mtt_base,
1139                                   dev->caps.mtt_entry_sz,
1140                                   dev->caps.num_mtts,
1141                                   dev->caps.reserved_mtts, 1, 0);
1142         if (err) {
1143                 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
1144                 goto err_unmap_eq;
1145         }
1146
1147         err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1148                                   init_hca->dmpt_base,
1149                                   dev_cap->dmpt_entry_sz,
1150                                   dev->caps.num_mpts,
1151                                   dev->caps.reserved_mrws, 1, 1);
1152         if (err) {
1153                 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
1154                 goto err_unmap_mtt;
1155         }
1156
1157         err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1158                                   init_hca->qpc_base,
1159                                   dev_cap->qpc_entry_sz,
1160                                   dev->caps.num_qps,
1161                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1162                                   0, 0);
1163         if (err) {
1164                 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
1165                 goto err_unmap_dmpt;
1166         }
1167
1168         err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1169                                   init_hca->auxc_base,
1170                                   dev_cap->aux_entry_sz,
1171                                   dev->caps.num_qps,
1172                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1173                                   0, 0);
1174         if (err) {
1175                 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
1176                 goto err_unmap_qp;
1177         }
1178
1179         err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1180                                   init_hca->altc_base,
1181                                   dev_cap->altc_entry_sz,
1182                                   dev->caps.num_qps,
1183                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1184                                   0, 0);
1185         if (err) {
1186                 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
1187                 goto err_unmap_auxc;
1188         }
1189
1190         err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1191                                   init_hca->rdmarc_base,
1192                                   dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1193                                   dev->caps.num_qps,
1194                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1195                                   0, 0);
1196         if (err) {
1197                 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1198                 goto err_unmap_altc;
1199         }
1200
1201         err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1202                                   init_hca->cqc_base,
1203                                   dev_cap->cqc_entry_sz,
1204                                   dev->caps.num_cqs,
1205                                   dev->caps.reserved_cqs, 0, 0);
1206         if (err) {
1207                 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
1208                 goto err_unmap_rdmarc;
1209         }
1210
1211         err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1212                                   init_hca->srqc_base,
1213                                   dev_cap->srq_entry_sz,
1214                                   dev->caps.num_srqs,
1215                                   dev->caps.reserved_srqs, 0, 0);
1216         if (err) {
1217                 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
1218                 goto err_unmap_cq;
1219         }
1220
1221         /*
1222          * For flow steering device managed mode it is required to use
1223          * mlx4_init_icm_table. For B0 steering mode it's not strictly
1224          * required, but for simplicity just map the whole multicast
1225          * group table now.  The table isn't very big and it's a lot
1226          * easier than trying to track ref counts.
1227          */
1228         err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1229                                   init_hca->mc_base,
1230                                   mlx4_get_mgm_entry_size(dev),
1231                                   dev->caps.num_mgms + dev->caps.num_amgms,
1232                                   dev->caps.num_mgms + dev->caps.num_amgms,
1233                                   0, 0);
1234         if (err) {
1235                 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
1236                 goto err_unmap_srq;
1237         }
1238
1239         return 0;
1240
1241 err_unmap_srq:
1242         mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1243
1244 err_unmap_cq:
1245         mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1246
1247 err_unmap_rdmarc:
1248         mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1249
1250 err_unmap_altc:
1251         mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1252
1253 err_unmap_auxc:
1254         mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1255
1256 err_unmap_qp:
1257         mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1258
1259 err_unmap_dmpt:
1260         mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1261
1262 err_unmap_mtt:
1263         mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1264
1265 err_unmap_eq:
1266         mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1267
1268 err_unmap_cmpt:
1269         mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1270         mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1271         mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1272         mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1273
1274 err_unmap_aux:
1275         mlx4_UNMAP_ICM_AUX(dev);
1276
1277 err_free_aux:
1278         mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1279
1280         return err;
1281 }
1282
1283 static void mlx4_free_icms(struct mlx4_dev *dev)
1284 {
1285         struct mlx4_priv *priv = mlx4_priv(dev);
1286
1287         mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1288         mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1289         mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1290         mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1291         mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1292         mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1293         mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1294         mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1295         mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1296         mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1297         mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1298         mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1299         mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1300         mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1301
1302         mlx4_UNMAP_ICM_AUX(dev);
1303         mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1304 }
1305
1306 static void mlx4_slave_exit(struct mlx4_dev *dev)
1307 {
1308         struct mlx4_priv *priv = mlx4_priv(dev);
1309
1310         mutex_lock(&priv->cmd.slave_cmd_mutex);
1311         if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
1312                 mlx4_warn(dev, "Failed to close slave function.\n");
1313         mutex_unlock(&priv->cmd.slave_cmd_mutex);
1314 }
1315
1316 static int map_bf_area(struct mlx4_dev *dev)
1317 {
1318         struct mlx4_priv *priv = mlx4_priv(dev);
1319         resource_size_t bf_start;
1320         resource_size_t bf_len;
1321         int err = 0;
1322
1323         if (!dev->caps.bf_reg_size)
1324                 return -ENXIO;
1325
1326         bf_start = pci_resource_start(dev->pdev, 2) +
1327                         (dev->caps.num_uars << PAGE_SHIFT);
1328         bf_len = pci_resource_len(dev->pdev, 2) -
1329                         (dev->caps.num_uars << PAGE_SHIFT);
1330         priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1331         if (!priv->bf_mapping)
1332                 err = -ENOMEM;
1333
1334         return err;
1335 }
1336
1337 static void unmap_bf_area(struct mlx4_dev *dev)
1338 {
1339         if (mlx4_priv(dev)->bf_mapping)
1340                 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1341 }
1342
1343 cycle_t mlx4_read_clock(struct mlx4_dev *dev)
1344 {
1345         u32 clockhi, clocklo, clockhi1;
1346         cycle_t cycles;
1347         int i;
1348         struct mlx4_priv *priv = mlx4_priv(dev);
1349
1350         for (i = 0; i < 10; i++) {
1351                 clockhi = swab32(readl(priv->clock_mapping));
1352                 clocklo = swab32(readl(priv->clock_mapping + 4));
1353                 clockhi1 = swab32(readl(priv->clock_mapping));
1354                 if (clockhi == clockhi1)
1355                         break;
1356         }
1357
1358         cycles = (u64) clockhi << 32 | (u64) clocklo;
1359
1360         return cycles;
1361 }
1362 EXPORT_SYMBOL_GPL(mlx4_read_clock);
1363
1364
1365 static int map_internal_clock(struct mlx4_dev *dev)
1366 {
1367         struct mlx4_priv *priv = mlx4_priv(dev);
1368
1369         priv->clock_mapping = ioremap(pci_resource_start(dev->pdev,
1370                                 priv->fw.clock_bar) +
1371                                 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1372
1373         if (!priv->clock_mapping)
1374                 return -ENOMEM;
1375
1376         return 0;
1377 }
1378
1379 static void unmap_internal_clock(struct mlx4_dev *dev)
1380 {
1381         struct mlx4_priv *priv = mlx4_priv(dev);
1382
1383         if (priv->clock_mapping)
1384                 iounmap(priv->clock_mapping);
1385 }
1386
1387 static void mlx4_close_hca(struct mlx4_dev *dev)
1388 {
1389         unmap_internal_clock(dev);
1390         unmap_bf_area(dev);
1391         if (mlx4_is_slave(dev))
1392                 mlx4_slave_exit(dev);
1393         else {
1394                 mlx4_CLOSE_HCA(dev, 0);
1395                 mlx4_free_icms(dev);
1396                 mlx4_UNMAP_FA(dev);
1397                 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1398         }
1399 }
1400
1401 static int mlx4_init_slave(struct mlx4_dev *dev)
1402 {
1403         struct mlx4_priv *priv = mlx4_priv(dev);
1404         u64 dma = (u64) priv->mfunc.vhcr_dma;
1405         int num_of_reset_retries = NUM_OF_RESET_RETRIES;
1406         int ret_from_reset = 0;
1407         u32 slave_read;
1408         u32 cmd_channel_ver;
1409
1410         mutex_lock(&priv->cmd.slave_cmd_mutex);
1411         priv->cmd.max_cmds = 1;
1412         mlx4_warn(dev, "Sending reset\n");
1413         ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1414                                        MLX4_COMM_TIME);
1415         /* if we are in the middle of flr the slave will try
1416          * NUM_OF_RESET_RETRIES times before leaving.*/
1417         if (ret_from_reset) {
1418                 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1419                         msleep(SLEEP_TIME_IN_RESET);
1420                         while (ret_from_reset && num_of_reset_retries) {
1421                                 mlx4_warn(dev, "slave is currently in the"
1422                                           "middle of FLR. retrying..."
1423                                           "(try num:%d)\n",
1424                                           (NUM_OF_RESET_RETRIES -
1425                                            num_of_reset_retries  + 1));
1426                                 ret_from_reset =
1427                                         mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
1428                                                       0, MLX4_COMM_TIME);
1429                                 num_of_reset_retries = num_of_reset_retries - 1;
1430                         }
1431                 } else
1432                         goto err;
1433         }
1434
1435         /* check the driver version - the slave I/F revision
1436          * must match the master's */
1437         slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
1438         cmd_channel_ver = mlx4_comm_get_version();
1439
1440         if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1441                 MLX4_COMM_GET_IF_REV(slave_read)) {
1442                 mlx4_err(dev, "slave driver version is not supported"
1443                          " by the master\n");
1444                 goto err;
1445         }
1446
1447         mlx4_warn(dev, "Sending vhcr0\n");
1448         if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
1449                                                     MLX4_COMM_TIME))
1450                 goto err;
1451         if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
1452                                                     MLX4_COMM_TIME))
1453                 goto err;
1454         if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
1455                                                     MLX4_COMM_TIME))
1456                 goto err;
1457         if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
1458                 goto err;
1459
1460         mutex_unlock(&priv->cmd.slave_cmd_mutex);
1461         return 0;
1462
1463 err:
1464         mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
1465         mutex_unlock(&priv->cmd.slave_cmd_mutex);
1466         return -EIO;
1467 }
1468
1469 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1470 {
1471         int i;
1472
1473         for (i = 1; i <= dev->caps.num_ports; i++) {
1474                 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
1475                         dev->caps.gid_table_len[i] =
1476                                 mlx4_get_slave_num_gids(dev, 0);
1477                 else
1478                         dev->caps.gid_table_len[i] = 1;
1479                 dev->caps.pkey_table_len[i] =
1480                         dev->phys_caps.pkey_phys_table_len[i] - 1;
1481         }
1482 }
1483
1484 static int choose_log_fs_mgm_entry_size(int qp_per_entry)
1485 {
1486         int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
1487
1488         for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
1489               i++) {
1490                 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
1491                         break;
1492         }
1493
1494         return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
1495 }
1496
1497 static void choose_steering_mode(struct mlx4_dev *dev,
1498                                  struct mlx4_dev_cap *dev_cap)
1499 {
1500         // This is only valid to the integrated driver.
1501         // The new ported mlx4_core driver is in B0 steering mode by default
1502         // and the old mlx4_en driver is in A0 steering mode by default.
1503         // If high_rate_steer == TRUE it means that A0 steering mode is on.
1504         // The integration fix is to hard code high_rate_steer to TRUE.
1505         high_rate_steer = 1;
1506
1507         if (high_rate_steer && !mlx4_is_mfunc(dev)) {
1508                 dev->caps.flags &= ~(MLX4_DEV_CAP_FLAG_VEP_MC_STEER |
1509                                      MLX4_DEV_CAP_FLAG_VEP_UC_STEER);
1510                 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_FS_EN;
1511         }
1512
1513         if (mlx4_log_num_mgm_entry_size == -1 &&
1514             dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1515             dev_cap->fs_log_max_ucast_qp_range_size == 0 &&
1516             (!mlx4_is_mfunc(dev) ||
1517              (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) &&
1518             choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1519                 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1520                 dev->oper_log_mgm_entry_size =
1521                         choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
1522                 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1523                 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
1524         } else {
1525                 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
1526                     dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
1527                         dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
1528                 }
1529                 else {
1530                         dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
1531
1532                         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1533                             dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1534                                 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
1535                                           "set to use B0 steering. Falling back to A0 steering mode.\n");
1536                 }
1537                 dev->oper_log_mgm_entry_size =
1538                         mlx4_log_num_mgm_entry_size > 0 ?
1539                         mlx4_log_num_mgm_entry_size :
1540                         MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
1541                 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1542         }
1543         mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
1544                  "log_num_mgm_entry_size = %d\n",
1545                  mlx4_steering_mode_str(dev->caps.steering_mode),
1546                  dev->oper_log_mgm_entry_size, mlx4_log_num_mgm_entry_size);
1547 }
1548
1549 static int mlx4_init_hca(struct mlx4_dev *dev)
1550 {
1551         struct mlx4_priv          *priv = mlx4_priv(dev);
1552         struct mlx4_dev_cap        *dev_cap = NULL;
1553         struct mlx4_adapter        adapter;
1554         struct mlx4_mod_stat_cfg   mlx4_cfg;
1555         struct mlx4_profile        profile;
1556         struct mlx4_init_hca_param init_hca;
1557         u64 icm_size;
1558         int err;
1559
1560         if (!mlx4_is_slave(dev)) {
1561                 err = mlx4_QUERY_FW(dev);
1562                 if (err) {
1563                         if (err == -EACCES)
1564                                 mlx4_info(dev, "non-primary physical function, skipping.\n");
1565                         else
1566                                 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
1567                         return err;
1568                 }
1569
1570                 err = mlx4_load_fw(dev);
1571                 if (err) {
1572                         mlx4_err(dev, "Failed to start FW, aborting.\n");
1573                         return err;
1574                 }
1575
1576                 mlx4_cfg.log_pg_sz_m = 1;
1577                 mlx4_cfg.log_pg_sz = 0;
1578                 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
1579                 if (err)
1580                         mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
1581
1582                 dev_cap = kzalloc(sizeof *dev_cap, GFP_KERNEL);
1583                 if (!dev_cap) {
1584                         mlx4_err(dev, "Failed to allocate memory for dev_cap\n");
1585                         err = -ENOMEM;
1586                         goto err_stop_fw;
1587                 }
1588
1589                 err = mlx4_dev_cap(dev, dev_cap);
1590                 if (err) {
1591                         mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
1592                         goto err_stop_fw;
1593                 }
1594
1595                 choose_steering_mode(dev, dev_cap);
1596
1597                 if (mlx4_is_master(dev))
1598                         mlx4_parav_master_pf_caps(dev);
1599
1600                 process_mod_param_profile(&profile);
1601                 if (dev->caps.steering_mode ==
1602                     MLX4_STEERING_MODE_DEVICE_MANAGED)
1603                         profile.num_mcg = MLX4_FS_NUM_MCG;
1604
1605                 icm_size = mlx4_make_profile(dev, &profile, dev_cap,
1606                                              &init_hca);
1607                 if ((long long) icm_size < 0) {
1608                         err = icm_size;
1609                         goto err_stop_fw;
1610                 }
1611
1612                 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
1613
1614                 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
1615                 init_hca.uar_page_sz = PAGE_SHIFT - 12;
1616
1617                 err = mlx4_init_icm(dev, dev_cap, &init_hca, icm_size);
1618                 if (err)
1619                         goto err_stop_fw;
1620
1621                 err = mlx4_INIT_HCA(dev, &init_hca);
1622                 if (err) {
1623                         mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
1624                         goto err_free_icm;
1625                 }
1626         } else {
1627                 err = mlx4_init_slave(dev);
1628                 if (err) {
1629                         mlx4_err(dev, "Failed to initialize slave\n");
1630                         return err;
1631                 }
1632
1633                 err = mlx4_slave_cap(dev);
1634                 if (err) {
1635                         mlx4_err(dev, "Failed to obtain slave caps\n");
1636                         goto err_close;
1637                 }
1638         }
1639
1640         if (map_bf_area(dev))
1641                 mlx4_dbg(dev, "Failed to map blue flame area\n");
1642
1643         /*
1644          * Read HCA frequency by QUERY_HCA command
1645          */
1646         if (dev->caps.cq_timestamp) {
1647                 memset(&init_hca, 0, sizeof(init_hca));
1648                 err = mlx4_QUERY_HCA(dev, &init_hca);
1649                 if (err) {
1650                         mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n");
1651                         dev->caps.cq_timestamp = 0;
1652                 } else
1653                         dev->caps.hca_core_clock = init_hca.hca_core_clock;
1654
1655                 /*
1656                  * In case we got HCA frequency 0 - disable timestamping
1657                  * to avoid dividing by zero
1658                  */
1659                 if (!dev->caps.hca_core_clock) {
1660                         dev->caps.cq_timestamp = 0;
1661                         mlx4_err(dev, "HCA frequency is 0. "
1662                                  "Timestamping is not supported.");
1663                 }
1664
1665                 /*
1666                  * Map internal clock, in case of failure disable timestamping
1667                  */
1668                 if (map_internal_clock(dev)) {
1669                         dev->caps.cq_timestamp = 0;
1670                         mlx4_err(dev, "Failed to map internal clock. "
1671                                  "Timestamping is not supported.\n");
1672                 }
1673         }
1674
1675         /*Only the master set the ports, all the rest got it from it.*/
1676         if (!mlx4_is_slave(dev))
1677                 mlx4_set_port_mask(dev);
1678
1679         err = mlx4_QUERY_ADAPTER(dev, &adapter);
1680         if (err) {
1681                 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
1682                 goto unmap_bf;
1683         }
1684
1685         priv->eq_table.inta_pin = adapter.inta_pin;
1686         memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
1687
1688         if (!mlx4_is_slave(dev))
1689                 kfree(dev_cap);
1690
1691         return 0;
1692
1693 unmap_bf:
1694         unmap_internal_clock(dev);
1695         unmap_bf_area(dev);
1696
1697         if (mlx4_is_slave(dev)) {
1698                 kfree(dev->caps.qp0_tunnel);
1699                 kfree(dev->caps.qp0_proxy);
1700                 kfree(dev->caps.qp1_tunnel);
1701                 kfree(dev->caps.qp1_proxy);
1702         }
1703
1704 err_close:
1705         if (mlx4_is_slave(dev))
1706                 mlx4_slave_exit(dev);
1707         else
1708                 mlx4_CLOSE_HCA(dev, 0);
1709
1710 err_free_icm:
1711         if (!mlx4_is_slave(dev))
1712                 mlx4_free_icms(dev);
1713
1714 err_stop_fw:
1715         if (!mlx4_is_slave(dev)) {
1716                 mlx4_UNMAP_FA(dev);
1717                 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1718                 if (dev_cap)
1719                         kfree(dev_cap);
1720         }
1721         return err;
1722 }
1723
1724 static int mlx4_init_counters_table(struct mlx4_dev *dev)
1725 {
1726         struct mlx4_priv *priv = mlx4_priv(dev);
1727         int res;
1728         int nent_pow2;
1729
1730         if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1731                 return -ENOENT;
1732
1733         nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
1734         res = mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
1735                                 nent_pow2 - 1, 0,
1736                                 nent_pow2 - dev->caps.max_counters);
1737         if (res)
1738                 return res;
1739
1740         if (dev->caps.max_counters == dev->caps.max_basic_counters)
1741                 return 0;
1742
1743         res = mlx4_cmd(dev, MLX4_IF_STATE_EXTENDED, 0, 0,
1744                 MLX4_CMD_SET_IF_STAT, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1745
1746         if (res)
1747                 mlx4_err(dev, "Failed to set extended counters (err=%d)\n",
1748                                 res);
1749         return res;
1750
1751 }
1752
1753 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
1754 {
1755         if (!mlx4_is_slave(dev) &&
1756                 (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1757                 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
1758 }
1759
1760 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1761 {
1762         struct mlx4_priv *priv = mlx4_priv(dev);
1763
1764         if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1765                 return -ENOENT;
1766
1767         *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
1768         if (*idx == -1)
1769                 return -ENOMEM;
1770
1771         return 0;
1772 }
1773
1774 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1775 {
1776         u64 out_param;
1777         int err;
1778
1779         if (mlx4_is_mfunc(dev)) {
1780                 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
1781                                    RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
1782                                    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1783                 if (!err)
1784                         *idx = get_param_l(&out_param);
1785
1786                 return err;
1787         }
1788         return __mlx4_counter_alloc(dev, idx);
1789 }
1790 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
1791
1792 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1793 {
1794         mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
1795         return;
1796 }
1797
1798 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1799 {
1800         u64 in_param = 0;
1801
1802         if (mlx4_is_mfunc(dev)) {
1803                 set_param_l(&in_param, idx);
1804                 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
1805                          MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
1806                          MLX4_CMD_WRAPPED);
1807                 return;
1808         }
1809         __mlx4_counter_free(dev, idx);
1810 }
1811 EXPORT_SYMBOL_GPL(mlx4_counter_free);
1812
1813 static int mlx4_setup_hca(struct mlx4_dev *dev)
1814 {
1815         struct mlx4_priv *priv = mlx4_priv(dev);
1816         int err;
1817         int port;
1818         __be32 ib_port_default_caps;
1819
1820         err = mlx4_init_uar_table(dev);
1821         if (err) {
1822                 mlx4_err(dev, "Failed to initialize "
1823                          "user access region table (err=%d), aborting.\n",
1824                          err);
1825                 return err;
1826         }
1827
1828         err = mlx4_uar_alloc(dev, &priv->driver_uar);
1829         if (err) {
1830                 mlx4_err(dev, "Failed to allocate driver access region "
1831                          "(err=%d), aborting.\n", err);
1832                 goto err_uar_table_free;
1833         }
1834
1835         priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1836         if (!priv->kar) {
1837                 mlx4_err(dev, "Couldn't map kernel access region, "
1838                          "aborting.\n");
1839                 err = -ENOMEM;
1840                 goto err_uar_free;
1841         }
1842
1843         err = mlx4_init_pd_table(dev);
1844         if (err) {
1845                 mlx4_err(dev, "Failed to initialize "
1846                          "protection domain table (err=%d), aborting.\n", err);
1847                 goto err_kar_unmap;
1848         }
1849
1850         err = mlx4_init_xrcd_table(dev);
1851         if (err) {
1852                 mlx4_err(dev, "Failed to initialize "
1853                          "reliable connection domain table (err=%d), "
1854                          "aborting.\n", err);
1855                 goto err_pd_table_free;
1856         }
1857
1858         err = mlx4_init_mr_table(dev);
1859         if (err) {
1860                 mlx4_err(dev, "Failed to initialize "
1861                          "memory region table (err=%d), aborting.\n", err);
1862                 goto err_xrcd_table_free;
1863         }
1864
1865         err = mlx4_init_eq_table(dev);
1866         if (err) {
1867                 mlx4_err(dev, "Failed to initialize "
1868                          "event queue table (err=%d), aborting.\n", err);
1869                 goto err_mr_table_free;
1870         }
1871
1872         err = mlx4_cmd_use_events(dev);
1873         if (err) {
1874                 mlx4_err(dev, "Failed to switch to event-driven "
1875                          "firmware commands (err=%d), aborting.\n", err);
1876                 goto err_eq_table_free;
1877         }
1878
1879         err = mlx4_NOP(dev);
1880         if (err) {
1881                 if (dev->flags & MLX4_FLAG_MSI_X) {
1882                         mlx4_warn(dev, "NOP command failed to generate MSI-X "
1883                                   "interrupt IRQ %d).\n",
1884                                   priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1885                         mlx4_warn(dev, "Trying again without MSI-X.\n");
1886                 } else {
1887                         mlx4_err(dev, "NOP command failed to generate interrupt "
1888                                  "(IRQ %d), aborting.\n",
1889                                  priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
1890                         mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
1891                 }
1892
1893                 goto err_cmd_poll;
1894         }
1895
1896         mlx4_dbg(dev, "NOP command IRQ test passed\n");
1897
1898         err = mlx4_init_cq_table(dev);
1899         if (err) {
1900                 mlx4_err(dev, "Failed to initialize "
1901                          "completion queue table (err=%d), aborting.\n", err);
1902                 goto err_cmd_poll;
1903         }
1904
1905         err = mlx4_init_srq_table(dev);
1906         if (err) {
1907                 mlx4_err(dev, "Failed to initialize "
1908                          "shared receive queue table (err=%d), aborting.\n",
1909                          err);
1910                 goto err_cq_table_free;
1911         }
1912
1913         err = mlx4_init_qp_table(dev);
1914         if (err) {
1915                 mlx4_err(dev, "Failed to initialize "
1916                          "queue pair table (err=%d), aborting.\n", err);
1917                 goto err_srq_table_free;
1918         }
1919
1920         if (!mlx4_is_slave(dev)) {
1921                 err = mlx4_init_mcg_table(dev);
1922                 if (err) {
1923                         mlx4_err(dev, "Failed to initialize "
1924                                  "multicast group table (err=%d), aborting.\n",
1925                                  err);
1926                         goto err_qp_table_free;
1927                 }
1928
1929                 err = mlx4_init_counters_table(dev);
1930                 if (err && err != -ENOENT) {
1931                         mlx4_err(dev, "Failed to initialize counters table (err=%d), "
1932                                  "aborting.\n", err);
1933                         goto err_mcg_table_free;
1934                 }
1935
1936                 for (port = 1; port <= dev->caps.num_ports; port++) {
1937                         ib_port_default_caps = 0;
1938                         err = mlx4_get_port_ib_caps(dev, port,
1939                                                     &ib_port_default_caps);
1940                         if (err)
1941                                 mlx4_warn(dev, "failed to get port %d default "
1942                                           "ib capabilities (%d). Continuing "
1943                                           "with caps = 0\n", port, err);
1944                         dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1945
1946                         /* initialize per-slave default ib port capabilities */
1947                         if (mlx4_is_master(dev)) {
1948                                 int i;
1949                                 for (i = 0; i < dev->num_slaves; i++) {
1950                                         if (i == mlx4_master_func_num(dev))
1951                                                 continue;
1952                                         priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
1953                                                         ib_port_default_caps;
1954                                 }
1955                         }
1956
1957                         if (mlx4_is_mfunc(dev))
1958                                 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
1959                         else
1960                                 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
1961
1962                         err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
1963                                             dev->caps.pkey_table_len[port] : -1);
1964                         if (err) {
1965                                 mlx4_err(dev, "Failed to set port %d (err=%d), "
1966                                          "aborting\n", port, err);
1967                                 goto err_counters_table_free;
1968                         }
1969                 }
1970         }
1971
1972         return 0;
1973
1974 err_counters_table_free:
1975         mlx4_cleanup_counters_table(dev);
1976
1977 err_mcg_table_free:
1978         mlx4_cleanup_mcg_table(dev);
1979
1980 err_qp_table_free:
1981         mlx4_cleanup_qp_table(dev);
1982
1983 err_srq_table_free:
1984         mlx4_cleanup_srq_table(dev);
1985
1986 err_cq_table_free:
1987         mlx4_cleanup_cq_table(dev);
1988
1989 err_cmd_poll:
1990         mlx4_cmd_use_polling(dev);
1991
1992 err_eq_table_free:
1993         mlx4_cleanup_eq_table(dev);
1994
1995 err_mr_table_free:
1996         mlx4_cleanup_mr_table(dev);
1997
1998 err_xrcd_table_free:
1999         mlx4_cleanup_xrcd_table(dev);
2000
2001 err_pd_table_free:
2002         mlx4_cleanup_pd_table(dev);
2003
2004 err_kar_unmap:
2005         iounmap(priv->kar);
2006
2007 err_uar_free:
2008         mlx4_uar_free(dev, &priv->driver_uar);
2009
2010 err_uar_table_free:
2011         mlx4_cleanup_uar_table(dev);
2012         return err;
2013 }
2014
2015 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2016 {
2017         struct mlx4_priv *priv = mlx4_priv(dev);
2018         struct msix_entry *entries;
2019         int nreq = min_t(int, dev->caps.num_ports *
2020                          min_t(int, num_possible_cpus() + 1, MAX_MSIX_P_PORT)
2021                                 + MSIX_LEGACY_SZ, MAX_MSIX);
2022         int err;
2023         int i;
2024
2025         if (msi_x) {
2026                 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
2027                              nreq);
2028
2029                 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2030                 if (!entries)
2031                         goto no_msi;
2032
2033                 for (i = 0; i < nreq; ++i)
2034                         entries[i].entry = i;
2035
2036         retry:
2037                 err = pci_enable_msix(dev->pdev, entries, nreq);
2038                 if (err) {
2039                         /* Try again if at least 2 vectors are available */
2040                         if (err > 1) {
2041                                 mlx4_info(dev, "Requested %d vectors, "
2042                                           "but only %d MSI-X vectors available, "
2043                                           "trying again\n", nreq, err);
2044                                 nreq = err;
2045                                 goto retry;
2046                         }
2047                         kfree(entries);
2048                         goto no_msi;
2049                 }
2050
2051                 if (nreq <
2052                     MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
2053                         /*Working in legacy mode , all EQ's shared*/
2054                         dev->caps.comp_pool           = 0;
2055                         dev->caps.num_comp_vectors = nreq - 1;
2056                 } else {
2057                         dev->caps.comp_pool           = nreq - MSIX_LEGACY_SZ;
2058                         dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
2059                 }
2060                 for (i = 0; i < nreq; ++i)
2061                         priv->eq_table.eq[i].irq = entries[i].vector;
2062
2063                 dev->flags |= MLX4_FLAG_MSI_X;
2064
2065                 kfree(entries);
2066                 return;
2067         }
2068
2069 no_msi:
2070         dev->caps.num_comp_vectors = 1;
2071         dev->caps.comp_pool        = 0;
2072
2073         for (i = 0; i < 2; ++i)
2074                 priv->eq_table.eq[i].irq = dev->pdev->irq;
2075 }
2076
2077 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2078 {
2079         struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
2080         int err = 0;
2081
2082         info->dev = dev;
2083         info->port = port;
2084         if (!mlx4_is_slave(dev)) {
2085                 mlx4_init_mac_table(dev, &info->mac_table);
2086                 mlx4_init_vlan_table(dev, &info->vlan_table);
2087                 info->base_qpn = mlx4_get_base_qpn(dev, port);
2088         }
2089
2090         sprintf(info->dev_name, "mlx4_port%d", port);
2091         info->port_attr.attr.name = info->dev_name;
2092         if (mlx4_is_mfunc(dev))
2093                 info->port_attr.attr.mode = S_IRUGO;
2094         else {
2095                 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
2096                 info->port_attr.store     = set_port_type;
2097         }
2098         info->port_attr.show      = show_port_type;
2099         sysfs_attr_init(&info->port_attr.attr);
2100
2101         err = device_create_file(&dev->pdev->dev, &info->port_attr);
2102         if (err) {
2103                 mlx4_err(dev, "Failed to create file for port %d\n", port);
2104                 info->port = -1;
2105         }
2106
2107         sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
2108         info->port_mtu_attr.attr.name = info->dev_mtu_name;
2109         if (mlx4_is_mfunc(dev))
2110                 info->port_mtu_attr.attr.mode = S_IRUGO;
2111         else {
2112                 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
2113                 info->port_mtu_attr.store     = set_port_ib_mtu;
2114         }
2115         info->port_mtu_attr.show      = show_port_ib_mtu;
2116         sysfs_attr_init(&info->port_mtu_attr.attr);
2117
2118         err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr);
2119         if (err) {
2120                 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
2121                 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
2122                 info->port = -1;
2123         }
2124
2125         return err;
2126 }
2127
2128 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
2129 {
2130         if (info->port < 0)
2131                 return;
2132
2133         device_remove_file(&info->dev->pdev->dev, &info->port_attr);
2134         device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr);
2135 }
2136
2137 static int mlx4_init_steering(struct mlx4_dev *dev)
2138 {
2139         struct mlx4_priv *priv = mlx4_priv(dev);
2140         int num_entries = dev->caps.num_ports;
2141         int i, j;
2142
2143         priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
2144         if (!priv->steer)
2145                 return -ENOMEM;
2146
2147         for (i = 0; i < num_entries; i++)
2148                 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2149                         INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
2150                         INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
2151                 }
2152         return 0;
2153 }
2154
2155 static void mlx4_clear_steering(struct mlx4_dev *dev)
2156 {
2157         struct mlx4_priv *priv = mlx4_priv(dev);
2158         struct mlx4_steer_index *entry, *tmp_entry;
2159         struct mlx4_promisc_qp *pqp, *tmp_pqp;
2160         int num_entries = dev->caps.num_ports;
2161         int i, j;
2162
2163         for (i = 0; i < num_entries; i++) {
2164                 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2165                         list_for_each_entry_safe(pqp, tmp_pqp,
2166                                                  &priv->steer[i].promisc_qps[j],
2167                                                  list) {
2168                                 list_del(&pqp->list);
2169                                 kfree(pqp);
2170                         }
2171                         list_for_each_entry_safe(entry, tmp_entry,
2172                                                  &priv->steer[i].steer_entries[j],
2173                                                  list) {
2174                                 list_del(&entry->list);
2175                                 list_for_each_entry_safe(pqp, tmp_pqp,
2176                                                          &entry->duplicates,
2177                                                          list) {
2178                                         list_del(&pqp->list);
2179                                         kfree(pqp);
2180                                 }
2181                                 kfree(entry);
2182                         }
2183                 }
2184         }
2185         kfree(priv->steer);
2186 }
2187
2188 static int extended_func_num(struct pci_dev *pdev)
2189 {
2190         return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
2191 }
2192
2193 #define MLX4_OWNER_BASE 0x8069c
2194 #define MLX4_OWNER_SIZE 4
2195
2196 static int mlx4_get_ownership(struct mlx4_dev *dev)
2197 {
2198         void __iomem *owner;
2199         u32 ret;
2200
2201         if (pci_channel_offline(dev->pdev))
2202                 return -EIO;
2203
2204         owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
2205                         MLX4_OWNER_SIZE);
2206         if (!owner) {
2207                 mlx4_err(dev, "Failed to obtain ownership bit\n");
2208                 return -ENOMEM;
2209         }
2210
2211         ret = readl(owner);
2212         iounmap(owner);
2213         return (int) !!ret;
2214 }
2215
2216 static void mlx4_free_ownership(struct mlx4_dev *dev)
2217 {
2218         void __iomem *owner;
2219
2220         if (pci_channel_offline(dev->pdev))
2221                 return;
2222
2223         owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
2224                         MLX4_OWNER_SIZE);
2225         if (!owner) {
2226                 mlx4_err(dev, "Failed to obtain ownership bit\n");
2227                 return;
2228         }
2229         writel(0, owner);
2230         msleep(1000);
2231         iounmap(owner);
2232 }
2233
2234 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2235 {
2236         struct mlx4_priv *priv;
2237         struct mlx4_dev *dev;
2238         int err;
2239         int port;
2240
2241         pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
2242
2243         err = pci_enable_device(pdev);
2244         if (err) {
2245                 dev_err(&pdev->dev, "Cannot enable PCI device, "
2246                         "aborting.\n");
2247                 return err;
2248         }
2249         if (num_vfs > MLX4_MAX_NUM_VF) {
2250                 dev_err(&pdev->dev, "There are more VF's (%d) than allowed(%d)\n",
2251                         num_vfs, MLX4_MAX_NUM_VF);
2252                 return -EINVAL;
2253         }
2254
2255         if (num_vfs < 0) {
2256                 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
2257                 return -EINVAL;
2258         }
2259         /*
2260          * Check for BARs.
2261          */
2262         if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
2263             !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2264                 dev_err(&pdev->dev, "Missing DCS, aborting."
2265                         "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%x)\n",
2266                         pci_dev_data, pci_resource_flags(pdev, 0));
2267                 err = -ENODEV;
2268                 goto err_disable_pdev;
2269         }
2270         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
2271                 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
2272                 err = -ENODEV;
2273                 goto err_disable_pdev;
2274         }
2275
2276         err = pci_request_regions(pdev, DRV_NAME);
2277         if (err) {
2278                 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
2279                 goto err_disable_pdev;
2280         }
2281
2282         pci_set_master(pdev);
2283
2284         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2285         if (err) {
2286                 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
2287                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2288                 if (err) {
2289                         dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
2290                         goto err_release_regions;
2291                 }
2292         }
2293         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2294         if (err) {
2295                 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
2296                          "consistent PCI DMA mask.\n");
2297                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2298                 if (err) {
2299                         dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
2300                                 "aborting.\n");
2301                         goto err_release_regions;
2302                 }
2303         }
2304
2305         /* Allow large DMA segments, up to the firmware limit of 1 GB */
2306         dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
2307
2308         priv = kzalloc(sizeof *priv, GFP_KERNEL);
2309         if (!priv) {
2310                 dev_err(&pdev->dev, "Device struct alloc failed, "
2311                         "aborting.\n");
2312                 err = -ENOMEM;
2313                 goto err_release_regions;
2314         }
2315
2316         dev       = &priv->dev;
2317         dev->pdev = pdev;
2318         INIT_LIST_HEAD(&priv->ctx_list);
2319         spin_lock_init(&priv->ctx_lock);
2320
2321         mutex_init(&priv->port_mutex);
2322
2323         INIT_LIST_HEAD(&priv->pgdir_list);
2324         mutex_init(&priv->pgdir_mutex);
2325
2326         INIT_LIST_HEAD(&priv->bf_list);
2327         mutex_init(&priv->bf_mutex);
2328
2329         dev->rev_id = pdev->revision;
2330         dev->numa_node = dev_to_node(&pdev->dev);
2331         /* Detect if this device is a virtual function */
2332         if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
2333                 /* When acting as pf, we normally skip vfs unless explicitly
2334                  * requested to probe them. */
2335                 if (num_vfs && extended_func_num(pdev) > probe_vf) {
2336                         mlx4_warn(dev, "Skipping virtual function:%d\n",
2337                                                 extended_func_num(pdev));
2338                         err = -ENODEV;
2339                         goto err_free_dev;
2340                 }
2341                 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
2342                 dev->flags |= MLX4_FLAG_SLAVE;
2343         } else {
2344                 /* We reset the device and enable SRIOV only for physical
2345                  * devices.  Try to claim ownership on the device;
2346                  * if already taken, skip -- do not allow multiple PFs */
2347                 err = mlx4_get_ownership(dev);
2348                 if (err) {
2349                         if (err < 0)
2350                                 goto err_free_dev;
2351                         else {
2352                                 mlx4_warn(dev, "Multiple PFs not yet supported."
2353                                           " Skipping PF.\n");
2354                                 err = -EINVAL;
2355                                 goto err_free_dev;
2356                         }
2357                 }
2358
2359                 if (num_vfs) {
2360                         mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
2361                         err = pci_enable_sriov(pdev, num_vfs);
2362                         if (err) {
2363                                 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2364                                          err);
2365                                 err = 0;
2366                         } else {
2367                                 mlx4_warn(dev, "Running in master mode\n");
2368                                 dev->flags |= MLX4_FLAG_SRIOV |
2369                                               MLX4_FLAG_MASTER;
2370                                 dev->num_vfs = num_vfs;
2371                         }
2372                 }
2373
2374                 atomic_set(&priv->opreq_count, 0);
2375                 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
2376
2377                 /*
2378                  * Now reset the HCA before we touch the PCI capabilities or
2379                  * attempt a firmware command, since a boot ROM may have left
2380                  * the HCA in an undefined state.
2381                  */
2382                 err = mlx4_reset(dev);
2383                 if (err) {
2384                         mlx4_err(dev, "Failed to reset HCA, aborting.\n");
2385                         goto err_sriov;
2386                 }
2387         }
2388
2389 slave_start:
2390         err = mlx4_cmd_init(dev);
2391         if (err) {
2392                 mlx4_err(dev, "Failed to init command interface, aborting.\n");
2393                 goto err_sriov;
2394         }
2395
2396         /* In slave functions, the communication channel must be initialized
2397          * before posting commands. Also, init num_slaves before calling
2398          * mlx4_init_hca */
2399         if (mlx4_is_mfunc(dev)) {
2400                 if (mlx4_is_master(dev))
2401                         dev->num_slaves = MLX4_MAX_NUM_SLAVES;
2402                 else {
2403                         dev->num_slaves = 0;
2404                         err = mlx4_multi_func_init(dev);
2405                         if (err) {
2406                                 mlx4_err(dev, "Failed to init slave mfunc"
2407                                          " interface, aborting.\n");
2408                                 goto err_cmd;
2409                         }
2410                 }
2411         }
2412
2413         err = mlx4_init_hca(dev);
2414         if (err) {
2415                 if (err == -EACCES) {
2416                         /* Not primary Physical function
2417                          * Running in slave mode */
2418                         mlx4_cmd_cleanup(dev);
2419                         dev->flags |= MLX4_FLAG_SLAVE;
2420                         dev->flags &= ~MLX4_FLAG_MASTER;
2421                         goto slave_start;
2422                 } else
2423                         goto err_mfunc;
2424         }
2425
2426         /* In master functions, the communication channel must be initialized
2427          * after obtaining its address from fw */
2428         if (mlx4_is_master(dev)) {
2429                 err = mlx4_multi_func_init(dev);
2430                 if (err) {
2431                         mlx4_err(dev, "Failed to init master mfunc"
2432                                  "interface, aborting.\n");
2433                         goto err_close;
2434                 }
2435         }
2436
2437         err = mlx4_alloc_eq_table(dev);
2438         if (err)
2439                 goto err_master_mfunc;
2440
2441         priv->msix_ctl.pool_bm = 0;
2442         mutex_init(&priv->msix_ctl.pool_lock);
2443
2444         mlx4_enable_msi_x(dev);
2445         if ((mlx4_is_mfunc(dev)) &&
2446             !(dev->flags & MLX4_FLAG_MSI_X)) {
2447                 err = -ENOSYS;
2448                 mlx4_err(dev, "INTx is not supported in multi-function mode."
2449                          " aborting.\n");
2450                 goto err_free_eq;
2451         }
2452
2453         if (!mlx4_is_slave(dev)) {
2454                 err = mlx4_init_steering(dev);
2455                 if (err)
2456                         goto err_free_eq;
2457         }
2458
2459         err = mlx4_setup_hca(dev);
2460         if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
2461             !mlx4_is_mfunc(dev)) {
2462                 dev->flags &= ~MLX4_FLAG_MSI_X;
2463                 dev->caps.num_comp_vectors = 1;
2464                 dev->caps.comp_pool        = 0;
2465                 pci_disable_msix(pdev);
2466                 err = mlx4_setup_hca(dev);
2467         }
2468
2469         if (err)
2470                 goto err_steer;
2471
2472         mlx4_init_quotas(dev);
2473
2474         for (port = 1; port <= dev->caps.num_ports; port++) {
2475                 err = mlx4_init_port_info(dev, port);
2476                 if (err)
2477                         goto err_port;
2478         }
2479
2480         err = mlx4_register_device(dev);
2481         if (err)
2482                 goto err_port;
2483
2484         err = mlx4_sense_init(dev);
2485         if (err)
2486                 goto err_port;
2487
2488         mlx4_start_sense(dev);
2489
2490         priv->pci_dev_data = pci_dev_data;
2491         pci_set_drvdata(pdev, dev);
2492
2493         return 0;
2494
2495 err_port:
2496         for (--port; port >= 1; --port)
2497                 mlx4_cleanup_port_info(&priv->port[port]);
2498
2499         mlx4_cleanup_counters_table(dev);
2500         mlx4_cleanup_mcg_table(dev);
2501         mlx4_cleanup_qp_table(dev);
2502         mlx4_cleanup_srq_table(dev);
2503         mlx4_cleanup_cq_table(dev);
2504         mlx4_cmd_use_polling(dev);
2505         mlx4_cleanup_eq_table(dev);
2506         mlx4_cleanup_mr_table(dev);
2507         mlx4_cleanup_xrcd_table(dev);
2508         mlx4_cleanup_pd_table(dev);
2509         mlx4_cleanup_uar_table(dev);
2510
2511 err_steer:
2512         if (!mlx4_is_slave(dev))
2513                 mlx4_clear_steering(dev);
2514
2515 err_free_eq:
2516         mlx4_free_eq_table(dev);
2517
2518 err_master_mfunc:
2519         if (mlx4_is_master(dev))
2520                 mlx4_multi_func_cleanup(dev);
2521
2522         if (mlx4_is_slave(dev)) {
2523                 kfree(dev->caps.qp0_tunnel);
2524                 kfree(dev->caps.qp0_proxy);
2525                 kfree(dev->caps.qp1_tunnel);
2526                 kfree(dev->caps.qp1_proxy);
2527         }
2528
2529 err_close:
2530         if (dev->flags & MLX4_FLAG_MSI_X)
2531                 pci_disable_msix(pdev);
2532
2533         mlx4_close_hca(dev);
2534
2535 err_mfunc:
2536         if (mlx4_is_slave(dev))
2537                 mlx4_multi_func_cleanup(dev);
2538
2539 err_cmd:
2540         mlx4_cmd_cleanup(dev);
2541
2542 err_sriov:
2543         if (dev->flags & MLX4_FLAG_SRIOV)
2544                 pci_disable_sriov(pdev);
2545
2546         if (!mlx4_is_slave(dev))
2547                 mlx4_free_ownership(dev);
2548
2549 err_free_dev:
2550         kfree(priv);
2551
2552 err_release_regions:
2553         pci_release_regions(pdev);
2554
2555 err_disable_pdev:
2556         pci_disable_device(pdev);
2557         pci_set_drvdata(pdev, NULL);
2558         return err;
2559 }
2560
2561 static int __devinit mlx4_init_one(struct pci_dev *pdev,
2562                                    const struct pci_device_id *id)
2563 {
2564         printk_once(KERN_INFO "%s", mlx4_version);
2565
2566         return __mlx4_init_one(pdev, id->driver_data);
2567 }
2568
2569 static void mlx4_remove_one(struct pci_dev *pdev)
2570 {
2571         struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
2572         struct mlx4_priv *priv = mlx4_priv(dev);
2573         int p;
2574
2575         if (dev) {
2576                 /* in SRIOV it is not allowed to unload the pf's
2577                  * driver while there are alive vf's */
2578                 if (mlx4_is_master(dev)) {
2579                         if (mlx4_how_many_lives_vf(dev))
2580                                 mlx4_err(dev, "Removing PF when there are assigned VF's !!!\n");
2581                 }
2582                 mlx4_sense_cleanup(dev);
2583                 mlx4_unregister_device(dev);
2584
2585                 for (p = 1; p <= dev->caps.num_ports; p++) {
2586                         mlx4_cleanup_port_info(&priv->port[p]);
2587                         mlx4_CLOSE_PORT(dev, p);
2588                 }
2589
2590                 if (mlx4_is_master(dev))
2591                         mlx4_free_resource_tracker(dev,
2592                                                    RES_TR_FREE_SLAVES_ONLY);
2593
2594                 mlx4_cleanup_counters_table(dev);
2595                 mlx4_cleanup_mcg_table(dev);
2596                 mlx4_cleanup_qp_table(dev);
2597                 mlx4_cleanup_srq_table(dev);
2598                 mlx4_cleanup_cq_table(dev);
2599                 mlx4_cmd_use_polling(dev);
2600                 mlx4_cleanup_eq_table(dev);
2601                 mlx4_cleanup_mr_table(dev);
2602                 mlx4_cleanup_xrcd_table(dev);
2603                 mlx4_cleanup_pd_table(dev);
2604
2605                 if (mlx4_is_master(dev))
2606                         mlx4_free_resource_tracker(dev,
2607                                                    RES_TR_FREE_STRUCTS_ONLY);
2608
2609                 iounmap(priv->kar);
2610                 mlx4_uar_free(dev, &priv->driver_uar);
2611                 mlx4_cleanup_uar_table(dev);
2612                 if (!mlx4_is_slave(dev))
2613                         mlx4_clear_steering(dev);
2614                 mlx4_free_eq_table(dev);
2615                 if (mlx4_is_master(dev))
2616                         mlx4_multi_func_cleanup(dev);
2617                 mlx4_close_hca(dev);
2618                 if (mlx4_is_slave(dev))
2619                         mlx4_multi_func_cleanup(dev);
2620                 mlx4_cmd_cleanup(dev);
2621
2622                 if (dev->flags & MLX4_FLAG_MSI_X)
2623                         pci_disable_msix(pdev);
2624                 if (dev->flags & MLX4_FLAG_SRIOV) {
2625                         mlx4_warn(dev, "Disabling SR-IOV\n");
2626                         pci_disable_sriov(pdev);
2627                 }
2628
2629                 if (!mlx4_is_slave(dev))
2630                         mlx4_free_ownership(dev);
2631
2632                 kfree(dev->caps.qp0_tunnel);
2633                 kfree(dev->caps.qp0_proxy);
2634                 kfree(dev->caps.qp1_tunnel);
2635                 kfree(dev->caps.qp1_proxy);
2636
2637                 kfree(priv);
2638                 pci_release_regions(pdev);
2639                 pci_disable_device(pdev);
2640                 pci_set_drvdata(pdev, NULL);
2641         }
2642 }
2643
2644 int mlx4_restart_one(struct pci_dev *pdev)
2645 {
2646         struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
2647         struct mlx4_priv *priv = mlx4_priv(dev);
2648         int               pci_dev_data;
2649
2650         pci_dev_data = priv->pci_dev_data;
2651         mlx4_remove_one(pdev);
2652         return __mlx4_init_one(pdev, pci_dev_data);
2653 }
2654
2655 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
2656         /* MT25408 "Hermon" SDR */
2657         { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2658         /* MT25408 "Hermon" DDR */
2659         { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2660         /* MT25408 "Hermon" QDR */
2661         { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2662         /* MT25408 "Hermon" DDR PCIe gen2 */
2663         { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2664         /* MT25408 "Hermon" QDR PCIe gen2 */
2665         { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2666         /* MT25408 "Hermon" EN 10GigE */
2667         { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2668         /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
2669         { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2670         /* MT25458 ConnectX EN 10GBASE-T 10GigE */
2671         { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2672         /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
2673         { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2674         /* MT26468 ConnectX EN 10GigE PCIe gen2*/
2675         { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2676         /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
2677         { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2678         /* MT26478 ConnectX2 40GigE PCIe gen2 */
2679         { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
2680         /* MT25400 Family [ConnectX-2 Virtual Function] */
2681         { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
2682         /* MT27500 Family [ConnectX-3] */
2683         { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
2684         /* MT27500 Family [ConnectX-3 Virtual Function] */
2685         { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
2686         { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
2687         { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
2688         { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
2689         { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
2690         { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
2691         { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
2692         { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
2693         { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
2694         { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
2695         { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
2696         { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
2697         { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
2698         { 0, }
2699 };
2700
2701 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
2702
2703 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2704                                               pci_channel_state_t state)
2705 {
2706         mlx4_remove_one(pdev);
2707
2708         return state == pci_channel_io_perm_failure ?
2709                 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
2710 }
2711
2712 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
2713 {
2714         int ret = __mlx4_init_one(pdev, 0);
2715
2716         return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
2717 }
2718
2719 static const struct pci_error_handlers mlx4_err_handler = {
2720         .error_detected = mlx4_pci_err_detected,
2721         .slot_reset     = mlx4_pci_slot_reset,
2722 };
2723
2724 static int suspend(struct pci_dev *pdev, pm_message_t state)
2725 {
2726         mlx4_remove_one(pdev);
2727
2728         if (mlx4_log_num_mgm_entry_size != -1 &&
2729             (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
2730              mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
2731                 pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
2732                            "in legal range (-1 or %d..%d)\n",
2733                            mlx4_log_num_mgm_entry_size,
2734                            MLX4_MIN_MGM_LOG_ENTRY_SIZE,
2735                            MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2736                 return -1;
2737         }
2738         return 0;
2739 }
2740
2741 static int resume(struct pci_dev *pdev)
2742 {
2743         return __mlx4_init_one(pdev, 0);
2744 }
2745
2746 static struct pci_driver mlx4_driver = {
2747         .name           = DRV_NAME,
2748         .id_table       = (struct pci_device_id*)mlx4_pci_table,
2749         .probe          = mlx4_init_one,
2750         .remove         = __devexit_p(mlx4_remove_one),
2751         .suspend        = suspend,
2752         .resume         = resume,
2753         .err_handler    = (struct pci_error_handlers*)&mlx4_err_handler,
2754 };
2755
2756 static int __init mlx4_verify_params(void)
2757 {
2758         if ((log_num_mac < 0) || (log_num_mac > 7)) {
2759                 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
2760                 return -1;
2761         }
2762
2763         if (log_num_vlan != 0)
2764                 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
2765                            MLX4_LOG_NUM_VLANS);
2766
2767         if (mlx4_set_4k_mtu != -1)
2768                 pr_warning("mlx4_core: set_4k_mtu - obsolete module param\n");
2769
2770         if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) {
2771                 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
2772                 return -1;
2773         }
2774
2775         /* Check if module param for ports type has legal combination */
2776         if (port_type_array[0] == false && port_type_array[1] == true) {
2777                 pr_warning("mlx4_core: module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
2778                 port_type_array[0] = true;
2779         }
2780
2781         if (mlx4_log_num_mgm_entry_size != -1 &&
2782             (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
2783              mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
2784                 pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
2785                            "in legal range (-1 or %d..%d)\n",
2786                            mlx4_log_num_mgm_entry_size,
2787                            MLX4_MIN_MGM_LOG_ENTRY_SIZE,
2788                            MLX4_MAX_MGM_LOG_ENTRY_SIZE);
2789                 return -1;
2790         }
2791
2792         if (mod_param_profile.num_qp < 18 || mod_param_profile.num_qp > 23) {
2793                 pr_warning("mlx4_core: bad log_num_qp: %d\n",
2794                            mod_param_profile.num_qp);
2795                 return -1;
2796         }
2797
2798         if (mod_param_profile.num_srq < 10) {
2799                 pr_warning("mlx4_core: too low log_num_srq: %d\n",
2800                            mod_param_profile.num_srq);
2801                 return -1;
2802         }
2803
2804         if (mod_param_profile.num_cq < 10) {
2805                 pr_warning("mlx4_core: too low log_num_cq: %d\n",
2806                            mod_param_profile.num_cq);
2807                 return -1;
2808         }
2809
2810         if (mod_param_profile.num_mpt < 10) {
2811                 pr_warning("mlx4_core: too low log_num_mpt: %d\n",
2812                            mod_param_profile.num_mpt);
2813                 return -1;
2814         }
2815
2816         if (mod_param_profile.num_mtt && mod_param_profile.num_mtt < 15) {
2817                 pr_warning("mlx4_core: too low log_num_mtt: %d\n",
2818                            mod_param_profile.num_mtt);
2819                 return -1;
2820         }
2821
2822         if (mod_param_profile.num_mtt > MLX4_MAX_LOG_NUM_MTT) {
2823                 pr_warning("mlx4_core: too high log_num_mtt: %d\n",
2824                            mod_param_profile.num_mtt);
2825                 return -1;
2826         }
2827         return 0;
2828 }
2829
2830 static int __init mlx4_init(void)
2831 {
2832         int ret;
2833
2834         if (mlx4_verify_params())
2835                 return -EINVAL;
2836
2837         mlx4_catas_init();
2838
2839         mlx4_wq = create_singlethread_workqueue("mlx4");
2840         if (!mlx4_wq)
2841                 return -ENOMEM;
2842
2843         if (enable_sys_tune)
2844                 sys_tune_init();
2845
2846         ret = pci_register_driver(&mlx4_driver);
2847         if (ret < 0 && enable_sys_tune)
2848                 sys_tune_fini();
2849
2850         return ret < 0 ? ret : 0;
2851 }
2852
2853 static void __exit mlx4_cleanup(void)
2854 {
2855         if (enable_sys_tune)
2856                 sys_tune_fini();
2857
2858         pci_unregister_driver(&mlx4_driver);
2859         destroy_workqueue(mlx4_wq);
2860 }
2861
2862 module_init_order(mlx4_init, SI_ORDER_MIDDLE);
2863 module_exit(mlx4_cleanup);
2864
2865 #undef MODULE_VERSION
2866 #include <sys/module.h>
2867 static int
2868 mlx4_evhand(module_t mod, int event, void *arg)
2869 {
2870         return (0);
2871 }
2872
2873 static moduledata_t mlx4_mod = {
2874         .name = "mlx4",
2875         .evhand = mlx4_evhand,
2876 };
2877 MODULE_VERSION(mlx4, 1);
2878 DECLARE_MODULE(mlx4, mlx4_mod, SI_SUB_SMP, SI_ORDER_ANY);