2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/kmod.h>
29 #include <linux/module.h>
30 #include <linux/errno.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/slab.h>
34 #include <linux/io-mapping.h>
35 #include <linux/interrupt.h>
36 #include <linux/hardirq.h>
37 #include <dev/mlx5/driver.h>
38 #include <dev/mlx5/cq.h>
39 #include <dev/mlx5/qp.h>
40 #include <dev/mlx5/srq.h>
41 #include <dev/mlx5/mpfs.h>
42 #include <linux/delay.h>
43 #include <dev/mlx5/mlx5_ifc.h>
44 #include <dev/mlx5/mlx5_fpga/core.h>
45 #include <dev/mlx5/mlx5_lib/mlx5.h>
46 #include "mlx5_core.h"
49 static const char mlx5_version[] = "Mellanox Core driver "
50 DRIVER_VERSION " (" DRIVER_RELDATE ")";
51 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
52 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
53 MODULE_LICENSE("Dual BSD/GPL");
54 MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1);
55 MODULE_DEPEND(mlx5, mlxfw, 1, 1, 1);
56 MODULE_DEPEND(mlx5, firmware, 1, 1, 1);
57 MODULE_VERSION(mlx5, 1);
59 SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW, 0, "mlx5 hardware controls");
61 int mlx5_core_debug_mask;
62 SYSCTL_INT(_hw_mlx5, OID_AUTO, debug_mask, CTLFLAG_RWTUN,
63 &mlx5_core_debug_mask, 0,
64 "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
66 #define MLX5_DEFAULT_PROF 2
67 static int mlx5_prof_sel = MLX5_DEFAULT_PROF;
68 SYSCTL_INT(_hw_mlx5, OID_AUTO, prof_sel, CTLFLAG_RWTUN,
70 "profile selector. Valid range 0 - 2");
72 static int mlx5_fast_unload_enabled = 1;
73 SYSCTL_INT(_hw_mlx5, OID_AUTO, fast_unload_enabled, CTLFLAG_RWTUN,
74 &mlx5_fast_unload_enabled, 0,
75 "Set to enable fast unload. Clear to disable.");
77 #define NUMA_NO_NODE -1
79 static LIST_HEAD(intf_list);
80 static LIST_HEAD(dev_list);
81 static DEFINE_MUTEX(intf_mutex);
83 struct mlx5_device_context {
84 struct list_head list;
85 struct mlx5_interface *intf;
90 MLX5_ATOMIC_REQ_MODE_BE = 0x0,
91 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
94 static struct mlx5_profile profiles[] = {
99 .mask = MLX5_PROF_MASK_QP_SIZE,
103 .mask = MLX5_PROF_MASK_QP_SIZE |
104 MLX5_PROF_MASK_MR_CACHE,
168 .mask = MLX5_PROF_MASK_QP_SIZE,
173 static int set_dma_caps(struct pci_dev *pdev)
175 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
178 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
180 mlx5_core_warn(dev, "couldn't set 64-bit PCI DMA mask\n");
181 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
183 mlx5_core_err(dev, "Can't set PCI DMA mask, aborting\n");
188 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
190 mlx5_core_warn(dev, "couldn't set 64-bit consistent PCI DMA mask\n");
191 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
193 mlx5_core_err(dev, "Can't set consistent PCI DMA mask, aborting\n");
198 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
202 int mlx5_pci_read_power_status(struct mlx5_core_dev *dev,
203 u16 *p_power, u8 *p_status)
205 u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {};
206 u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {};
209 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
210 MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MPEIN, 0, 0);
212 *p_status = MLX5_GET(mpein_reg, out, pwr_status);
213 *p_power = MLX5_GET(mpein_reg, out, pci_power);
217 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
219 struct pci_dev *pdev = dev->pdev;
222 mutex_lock(&dev->pci_status_mutex);
223 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
224 err = pci_enable_device(pdev);
226 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
228 mutex_unlock(&dev->pci_status_mutex);
233 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
235 struct pci_dev *pdev = dev->pdev;
237 mutex_lock(&dev->pci_status_mutex);
238 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
239 pci_disable_device(pdev);
240 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
242 mutex_unlock(&dev->pci_status_mutex);
245 static int request_bar(struct pci_dev *pdev)
247 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
250 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
251 mlx5_core_err(dev, "Missing registers BAR, aborting\n");
255 err = pci_request_regions(pdev, DRIVER_NAME);
257 mlx5_core_err(dev, "Couldn't get PCI resources, aborting\n");
262 static void release_bar(struct pci_dev *pdev)
264 pci_release_regions(pdev);
267 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
269 struct mlx5_priv *priv = &dev->priv;
270 struct mlx5_eq_table *table = &priv->eq_table;
271 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
272 int limit = dev->msix_eqvec;
273 int nvec = MLX5_EQ_VEC_COMP_BASE;
279 nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus();
284 nvec = 256; /* limit of firmware API */
285 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
288 priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL);
290 for (i = 0; i < nvec; i++)
291 priv->msix_arr[i].entry = i;
293 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
294 MLX5_EQ_VEC_COMP_BASE + 1, nvec);
298 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
302 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
304 struct mlx5_priv *priv = &dev->priv;
306 pci_disable_msix(dev->pdev);
307 kfree(priv->msix_arr);
310 struct mlx5_reg_host_endianess {
316 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
319 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
320 MLX5_DEV_CAP_FLAG_DCT |
321 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR,
324 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
340 mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
345 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
346 enum mlx5_cap_type cap_type,
347 enum mlx5_cap_mode cap_mode)
349 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
350 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
351 void *out, *hca_caps;
352 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
355 memset(in, 0, sizeof(in));
356 out = kzalloc(out_sz, GFP_KERNEL);
358 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
359 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
360 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
363 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
364 cap_type, cap_mode, err);
368 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
371 case HCA_CAP_OPMOD_GET_MAX:
372 memcpy(dev->hca_caps_max[cap_type], hca_caps,
373 MLX5_UN_SZ_BYTES(hca_cap_union));
375 case HCA_CAP_OPMOD_GET_CUR:
376 memcpy(dev->hca_caps_cur[cap_type], hca_caps,
377 MLX5_UN_SZ_BYTES(hca_cap_union));
381 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
391 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
395 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
399 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
402 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
404 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
406 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
408 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
411 static int handle_hca_cap(struct mlx5_core_dev *dev)
413 void *set_ctx = NULL;
414 struct mlx5_profile *prof = dev->profile;
416 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
419 set_ctx = kzalloc(set_sz, GFP_KERNEL);
421 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
425 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
427 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
428 MLX5_ST_SZ_BYTES(cmd_hca_cap));
430 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
431 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
433 /* we limit the size of the pkey table to 128 entries for now */
434 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
435 to_fw_pkey_sz(dev, 128));
437 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
438 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
441 /* disable cmdif checksum */
442 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
444 /* enable drain sigerr */
445 MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1);
447 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
449 err = set_caps(dev, set_ctx, set_sz);
456 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
460 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
464 if (MLX5_CAP_GEN(dev, atomic)) {
465 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
474 supported_atomic_req_8B_endianess_mode_1);
476 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
479 set_ctx = kzalloc(set_sz, GFP_KERNEL);
483 MLX5_SET(set_hca_cap_in, set_ctx, op_mod,
484 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1);
485 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
487 /* Set requestor to host endianness */
488 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
489 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
491 err = set_caps(dev, set_ctx, set_sz);
497 static int set_hca_ctrl(struct mlx5_core_dev *dev)
499 struct mlx5_reg_host_endianess he_in;
500 struct mlx5_reg_host_endianess he_out;
503 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
504 !MLX5_CAP_GEN(dev, roce))
507 memset(&he_in, 0, sizeof(he_in));
508 he_in.he = MLX5_SET_HOST_ENDIANNESS;
509 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
510 &he_out, sizeof(he_out),
511 MLX5_REG_HOST_ENDIANNESS, 0, 1);
515 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
517 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
518 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
520 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
521 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
524 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
526 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
527 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
529 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
530 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
533 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
535 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
536 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
540 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
542 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out));
547 mlx5_cmd_mbox_status(query_out, &status, &syndrome);
548 if (status == MLX5_CMD_STAT_BAD_OP_ERR) {
549 mlx5_core_dbg(dev, "Only ISSI 0 is supported\n");
553 mlx5_core_err(dev, "failed to query ISSI\n");
557 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
559 if (sup_issi & (1 << 1)) {
560 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
561 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
563 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
564 MLX5_SET(set_issi_in, set_in, current_issi, 1);
566 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out));
568 mlx5_core_err(dev, "failed to set ISSI=1 err(%d)\n", err);
575 } else if (sup_issi & (1 << 0)) {
583 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
585 struct mlx5_eq_table *table = &dev->priv.eq_table;
589 spin_lock(&table->lock);
590 list_for_each_entry(eq, &table->comp_eqs_list, list) {
591 if (eq->index == vector) {
598 spin_unlock(&table->lock);
602 EXPORT_SYMBOL(mlx5_vector2eqn);
604 static void free_comp_eqs(struct mlx5_core_dev *dev)
606 struct mlx5_eq_table *table = &dev->priv.eq_table;
607 struct mlx5_eq *eq, *n;
609 spin_lock(&table->lock);
610 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
612 spin_unlock(&table->lock);
613 if (mlx5_destroy_unmap_eq(dev, eq))
614 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
617 spin_lock(&table->lock);
619 spin_unlock(&table->lock);
622 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
624 struct mlx5_eq_table *table = &dev->priv.eq_table;
631 INIT_LIST_HEAD(&table->comp_eqs_list);
632 ncomp_vec = table->num_comp_vectors;
633 nent = MLX5_COMP_EQ_SIZE;
634 for (i = 0; i < ncomp_vec; i++) {
635 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
637 err = mlx5_create_map_eq(dev, eq,
638 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
639 &dev->priv.uuari.uars[0]);
644 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
646 spin_lock(&table->lock);
647 list_add_tail(&eq->list, &table->comp_eqs_list);
648 spin_unlock(&table->lock);
658 static int map_bf_area(struct mlx5_core_dev *dev)
660 resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
661 resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
663 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
665 return dev->priv.bf_mapping ? 0 : -ENOMEM;
668 static void unmap_bf_area(struct mlx5_core_dev *dev)
670 if (dev->priv.bf_mapping)
671 io_mapping_free(dev->priv.bf_mapping);
674 static inline int fw_initializing(struct mlx5_core_dev *dev)
676 return ioread32be(&dev->iseg->initializing) >> 31;
679 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
682 int warn = jiffies + msecs_to_jiffies(warn_time_mili);
683 int end = jiffies + msecs_to_jiffies(max_wait_mili);
686 MPASS(max_wait_mili > warn_time_mili);
688 while (fw_initializing(dev) == 1) {
689 if (time_after(jiffies, end)) {
693 if (warn_time_mili && time_after(jiffies, warn)) {
695 "Waiting for FW initialization, timeout abort in %u s\n",
696 (unsigned int)(jiffies_to_msecs(end - warn) / 1000));
697 warn = jiffies + msecs_to_jiffies(warn_time_mili);
699 msleep(FW_INIT_WAIT_MS);
703 mlx5_core_dbg(dev, "Full initializing bit dword = 0x%x\n",
704 ioread32be(&dev->iseg->initializing));
709 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
711 struct mlx5_device_context *dev_ctx;
712 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
714 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
718 dev_ctx->intf = intf;
719 CURVNET_SET_QUIET(vnet0);
720 dev_ctx->context = intf->add(dev);
723 if (dev_ctx->context) {
724 spin_lock_irq(&priv->ctx_lock);
725 list_add_tail(&dev_ctx->list, &priv->ctx_list);
726 spin_unlock_irq(&priv->ctx_lock);
732 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
734 struct mlx5_device_context *dev_ctx;
735 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
737 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
738 if (dev_ctx->intf == intf) {
739 spin_lock_irq(&priv->ctx_lock);
740 list_del(&dev_ctx->list);
741 spin_unlock_irq(&priv->ctx_lock);
743 intf->remove(dev, dev_ctx->context);
750 mlx5_register_device(struct mlx5_core_dev *dev)
752 struct mlx5_priv *priv = &dev->priv;
753 struct mlx5_interface *intf;
755 mutex_lock(&intf_mutex);
756 list_add_tail(&priv->dev_list, &dev_list);
757 list_for_each_entry(intf, &intf_list, list)
758 mlx5_add_device(intf, priv);
759 mutex_unlock(&intf_mutex);
765 mlx5_unregister_device(struct mlx5_core_dev *dev)
767 struct mlx5_priv *priv = &dev->priv;
768 struct mlx5_interface *intf;
770 mutex_lock(&intf_mutex);
771 list_for_each_entry(intf, &intf_list, list)
772 mlx5_remove_device(intf, priv);
773 list_del(&priv->dev_list);
774 mutex_unlock(&intf_mutex);
777 int mlx5_register_interface(struct mlx5_interface *intf)
779 struct mlx5_priv *priv;
781 if (!intf->add || !intf->remove)
784 mutex_lock(&intf_mutex);
785 list_add_tail(&intf->list, &intf_list);
786 list_for_each_entry(priv, &dev_list, dev_list)
787 mlx5_add_device(intf, priv);
788 mutex_unlock(&intf_mutex);
792 EXPORT_SYMBOL(mlx5_register_interface);
794 void mlx5_unregister_interface(struct mlx5_interface *intf)
796 struct mlx5_priv *priv;
798 mutex_lock(&intf_mutex);
799 list_for_each_entry(priv, &dev_list, dev_list)
800 mlx5_remove_device(intf, priv);
801 list_del(&intf->list);
802 mutex_unlock(&intf_mutex);
804 EXPORT_SYMBOL(mlx5_unregister_interface);
806 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
808 struct mlx5_priv *priv = &mdev->priv;
809 struct mlx5_device_context *dev_ctx;
813 spin_lock_irqsave(&priv->ctx_lock, flags);
815 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
816 if ((dev_ctx->intf->protocol == protocol) &&
817 dev_ctx->intf->get_dev) {
818 result = dev_ctx->intf->get_dev(dev_ctx->context);
822 spin_unlock_irqrestore(&priv->ctx_lock, flags);
826 EXPORT_SYMBOL(mlx5_get_protocol_dev);
828 static int mlx5_auto_fw_update;
829 SYSCTL_INT(_hw_mlx5, OID_AUTO, auto_fw_update, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
830 &mlx5_auto_fw_update, 0,
831 "Allow automatic firmware update on driver start");
833 mlx5_firmware_update(struct mlx5_core_dev *dev)
835 const struct firmware *fw;
838 TUNABLE_INT_FETCH("hw.mlx5.auto_fw_update", &mlx5_auto_fw_update);
839 if (!mlx5_auto_fw_update)
841 fw = firmware_get("mlx5fw_mfa");
843 err = mlx5_firmware_flash(dev, fw);
844 firmware_put(fw, FIRMWARE_UNLOAD);
852 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
854 struct pci_dev *pdev = dev->pdev;
857 pci_set_drvdata(dev->pdev, dev);
858 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
859 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
861 mutex_init(&priv->pgdir_mutex);
862 INIT_LIST_HEAD(&priv->pgdir_list);
863 spin_lock_init(&priv->mkey_lock);
865 priv->numa_node = NUMA_NO_NODE;
867 err = mlx5_pci_enable_device(dev);
869 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
873 err = request_bar(pdev);
875 mlx5_core_err(dev, "error requesting BARs, aborting\n");
879 pci_set_master(pdev);
881 err = set_dma_caps(pdev);
883 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
887 dev->iseg_base = pci_resource_start(dev->pdev, 0);
888 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
891 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
898 release_bar(dev->pdev);
900 mlx5_pci_disable_device(dev);
905 static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
908 release_bar(dev->pdev);
909 mlx5_pci_disable_device(dev);
912 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
916 err = mlx5_vsc_find_cap(dev);
918 mlx5_core_err(dev, "Unable to find vendor specific capabilities\n");
920 err = mlx5_query_hca_caps(dev);
922 mlx5_core_err(dev, "query hca failed\n");
926 err = mlx5_query_board_id(dev);
928 mlx5_core_err(dev, "query board id failed\n");
932 err = mlx5_eq_init(dev);
934 mlx5_core_err(dev, "failed to initialize eq\n");
938 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
940 err = mlx5_init_cq_table(dev);
942 mlx5_core_err(dev, "failed to initialize cq table\n");
946 mlx5_init_qp_table(dev);
947 mlx5_init_srq_table(dev);
948 mlx5_init_mr_table(dev);
950 mlx5_init_reserved_gids(dev);
954 err = mlx5_init_rl_table(dev);
956 mlx5_core_err(dev, "Failed to init rate limiting\n");
957 goto err_tables_cleanup;
964 mlx5_cleanup_mr_table(dev);
965 mlx5_cleanup_srq_table(dev);
966 mlx5_cleanup_qp_table(dev);
967 mlx5_cleanup_cq_table(dev);
971 mlx5_eq_cleanup(dev);
977 static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
980 mlx5_cleanup_rl_table(dev);
982 mlx5_fpga_cleanup(dev);
983 mlx5_cleanup_reserved_gids(dev);
984 mlx5_cleanup_mr_table(dev);
985 mlx5_cleanup_srq_table(dev);
986 mlx5_cleanup_qp_table(dev);
987 mlx5_cleanup_cq_table(dev);
988 mlx5_eq_cleanup(dev);
991 static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
996 mutex_lock(&dev->intf_state_mutex);
997 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
998 mlx5_core_warn(dev, "interface is up, NOP\n");
1002 mlx5_core_dbg(dev, "firmware version: %d.%d.%d\n",
1003 fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
1006 * On load removing any previous indication of internal error,
1009 dev->state = MLX5_DEVICE_STATE_UP;
1011 /* wait for firmware to accept initialization segments configurations
1013 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI,
1014 FW_INIT_WARN_MESSAGE_INTERVAL);
1016 dev_err(&dev->pdev->dev,
1017 "Firmware over %d MS in pre-initializing state, aborting\n",
1018 FW_PRE_INIT_TIMEOUT_MILI);
1022 err = mlx5_cmd_init(dev);
1025 "Failed initializing command interface, aborting\n");
1029 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
1032 "Firmware over %d MS in initializing state, aborting\n",
1033 FW_INIT_TIMEOUT_MILI);
1034 goto err_cmd_cleanup;
1037 err = mlx5_core_enable_hca(dev);
1039 mlx5_core_err(dev, "enable hca failed\n");
1040 goto err_cmd_cleanup;
1043 err = mlx5_core_set_issi(dev);
1045 mlx5_core_err(dev, "failed to set issi\n");
1046 goto err_disable_hca;
1049 err = mlx5_pagealloc_start(dev);
1051 mlx5_core_err(dev, "mlx5_pagealloc_start failed\n");
1052 goto err_disable_hca;
1055 err = mlx5_satisfy_startup_pages(dev, 1);
1057 mlx5_core_err(dev, "failed to allocate boot pages\n");
1058 goto err_pagealloc_stop;
1061 err = set_hca_ctrl(dev);
1063 mlx5_core_err(dev, "set_hca_ctrl failed\n");
1064 goto reclaim_boot_pages;
1067 err = handle_hca_cap(dev);
1069 mlx5_core_err(dev, "handle_hca_cap failed\n");
1070 goto reclaim_boot_pages;
1073 err = handle_hca_cap_atomic(dev);
1075 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
1076 goto reclaim_boot_pages;
1079 err = mlx5_satisfy_startup_pages(dev, 0);
1081 mlx5_core_err(dev, "failed to allocate init pages\n");
1082 goto reclaim_boot_pages;
1085 err = mlx5_cmd_init_hca(dev);
1087 mlx5_core_err(dev, "init hca failed\n");
1088 goto reclaim_boot_pages;
1091 mlx5_start_health_poll(dev);
1093 if (boot && mlx5_init_once(dev, priv)) {
1094 mlx5_core_err(dev, "sw objs init failed\n");
1098 err = mlx5_enable_msix(dev);
1100 mlx5_core_err(dev, "enable msix failed\n");
1101 goto err_cleanup_once;
1104 err = mlx5_alloc_uuars(dev, &priv->uuari);
1106 mlx5_core_err(dev, "Failed allocating uar, aborting\n");
1107 goto err_disable_msix;
1110 err = mlx5_start_eqs(dev);
1112 mlx5_core_err(dev, "Failed to start pages and async EQs\n");
1116 err = alloc_comp_eqs(dev);
1118 mlx5_core_err(dev, "Failed to alloc completion EQs\n");
1122 if (map_bf_area(dev))
1123 mlx5_core_err(dev, "Failed to map blue flame area\n");
1125 err = mlx5_init_fs(dev);
1127 mlx5_core_err(dev, "flow steering init %d\n", err);
1128 goto err_free_comp_eqs;
1131 err = mlx5_mpfs_init(dev);
1133 mlx5_core_err(dev, "mpfs init failed %d\n", err);
1137 err = mlx5_fpga_device_start(dev);
1139 mlx5_core_err(dev, "fpga device start failed %d\n", err);
1143 err = mlx5_register_device(dev);
1145 mlx5_core_err(dev, "mlx5_register_device failed %d\n", err);
1149 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1152 mutex_unlock(&dev->intf_state_mutex);
1156 mlx5_fpga_device_stop(dev);
1159 mlx5_mpfs_destroy(dev);
1162 mlx5_cleanup_fs(dev);
1172 mlx5_free_uuars(dev, &priv->uuari);
1175 mlx5_disable_msix(dev);
1179 mlx5_cleanup_once(dev);
1182 mlx5_stop_health_poll(dev, boot);
1183 if (mlx5_cmd_teardown_hca(dev)) {
1184 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
1189 mlx5_reclaim_startup_pages(dev);
1192 mlx5_pagealloc_stop(dev);
1195 mlx5_core_disable_hca(dev);
1198 mlx5_cmd_cleanup(dev);
1201 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1202 mutex_unlock(&dev->intf_state_mutex);
1207 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1213 mlx5_drain_health_recovery(dev);
1215 mutex_lock(&dev->intf_state_mutex);
1216 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1217 mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__);
1219 mlx5_cleanup_once(dev);
1223 mlx5_unregister_device(dev);
1225 mlx5_fpga_device_stop(dev);
1226 mlx5_mpfs_destroy(dev);
1227 mlx5_cleanup_fs(dev);
1229 mlx5_wait_for_reclaim_vfs_pages(dev);
1232 mlx5_free_uuars(dev, &priv->uuari);
1233 mlx5_disable_msix(dev);
1235 mlx5_cleanup_once(dev);
1236 mlx5_stop_health_poll(dev, cleanup);
1237 err = mlx5_cmd_teardown_hca(dev);
1239 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
1242 mlx5_pagealloc_stop(dev);
1243 mlx5_reclaim_startup_pages(dev);
1244 mlx5_core_disable_hca(dev);
1245 mlx5_cmd_cleanup(dev);
1248 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1249 mutex_unlock(&dev->intf_state_mutex);
1253 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1254 unsigned long param)
1256 struct mlx5_priv *priv = &dev->priv;
1257 struct mlx5_device_context *dev_ctx;
1258 unsigned long flags;
1260 spin_lock_irqsave(&priv->ctx_lock, flags);
1262 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
1263 if (dev_ctx->intf->event)
1264 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
1266 spin_unlock_irqrestore(&priv->ctx_lock, flags);
1269 struct mlx5_core_event_handler {
1270 void (*event)(struct mlx5_core_dev *dev,
1271 enum mlx5_dev_event event,
1275 #define MLX5_STATS_DESC(a, b, c, d, e, ...) d, e,
1277 #define MLX5_PORT_MODULE_ERROR_STATS(m) \
1278 m(+1, u64, power_budget_exceeded, "power_budget", "Module Power Budget Exceeded") \
1279 m(+1, u64, long_range, "long_range", "Module Long Range for non MLNX cable/module") \
1280 m(+1, u64, bus_stuck, "bus_stuck", "Module Bus stuck(I2C or data shorted)") \
1281 m(+1, u64, no_eeprom, "no_eeprom", "No EEPROM/retry timeout") \
1282 m(+1, u64, enforce_part_number, "enforce_part_number", "Module Enforce part number list") \
1283 m(+1, u64, unknown_id, "unknown_id", "Module Unknown identifier") \
1284 m(+1, u64, high_temp, "high_temp", "Module High Temperature") \
1285 m(+1, u64, cable_shorted, "cable_shorted", "Module Cable is shorted")
1287 static const char *mlx5_pme_err_desc[] = {
1288 MLX5_PORT_MODULE_ERROR_STATS(MLX5_STATS_DESC)
1291 static int init_one(struct pci_dev *pdev,
1292 const struct pci_device_id *id)
1294 struct mlx5_core_dev *dev;
1295 struct mlx5_priv *priv;
1296 device_t bsddev = pdev->dev.bsddev;
1298 struct sysctl_oid *pme_sysctl_node;
1299 struct sysctl_oid *pme_err_sysctl_node;
1301 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1304 priv->pci_dev_data = id->driver_data;
1306 if (mlx5_prof_sel < 0 || mlx5_prof_sel >= ARRAY_SIZE(profiles)) {
1307 device_printf(bsddev,
1308 "WARN: selected profile out of range, selecting default (%d)\n",
1310 mlx5_prof_sel = MLX5_DEFAULT_PROF;
1312 dev->profile = &profiles[mlx5_prof_sel];
1314 dev->event = mlx5_core_event;
1317 device_set_desc(bsddev, mlx5_version);
1319 sysctl_ctx_init(&dev->sysctl_ctx);
1320 SYSCTL_ADD_INT(&dev->sysctl_ctx,
1321 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1322 OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0,
1323 "Maximum number of MSIX event queue vectors, if set");
1324 SYSCTL_ADD_INT(&dev->sysctl_ctx,
1325 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1326 OID_AUTO, "power_status", CTLFLAG_RD, &dev->pwr_status, 0,
1327 "0:Invalid 1:Sufficient 2:Insufficient");
1328 SYSCTL_ADD_INT(&dev->sysctl_ctx,
1329 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1330 OID_AUTO, "power_value", CTLFLAG_RD, &dev->pwr_value, 0,
1331 "Current power value in Watts");
1333 pme_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
1334 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1335 OID_AUTO, "pme_stats", CTLFLAG_RD, NULL,
1336 "Port module event statistics");
1337 if (pme_sysctl_node == NULL) {
1339 goto clean_sysctl_ctx;
1341 pme_err_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
1342 SYSCTL_CHILDREN(pme_sysctl_node),
1343 OID_AUTO, "errors", CTLFLAG_RD, NULL,
1344 "Port module event error statistics");
1345 if (pme_err_sysctl_node == NULL) {
1347 goto clean_sysctl_ctx;
1349 SYSCTL_ADD_U64(&dev->sysctl_ctx,
1350 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO,
1351 "module_plug", CTLFLAG_RD | CTLFLAG_MPSAFE,
1352 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_PLUGGED_ENABLED],
1353 0, "Number of time module plugged");
1354 SYSCTL_ADD_U64(&dev->sysctl_ctx,
1355 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO,
1356 "module_unplug", CTLFLAG_RD | CTLFLAG_MPSAFE,
1357 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_UNPLUGGED],
1358 0, "Number of time module unplugged");
1359 for (i = 0 ; i < MLX5_MODULE_EVENT_ERROR_NUM; i++) {
1360 SYSCTL_ADD_U64(&dev->sysctl_ctx,
1361 SYSCTL_CHILDREN(pme_err_sysctl_node), OID_AUTO,
1362 mlx5_pme_err_desc[2 * i], CTLFLAG_RD | CTLFLAG_MPSAFE,
1363 &dev->priv.pme_stats.error_counters[i],
1364 0, mlx5_pme_err_desc[2 * i + 1]);
1368 INIT_LIST_HEAD(&priv->ctx_list);
1369 spin_lock_init(&priv->ctx_lock);
1370 mutex_init(&dev->pci_status_mutex);
1371 mutex_init(&dev->intf_state_mutex);
1372 mtx_init(&dev->dump_lock, "mlx5dmp", NULL, MTX_DEF | MTX_NEW);
1373 err = mlx5_pci_init(dev, priv);
1375 mlx5_core_err(dev, "mlx5_pci_init failed %d\n", err);
1379 err = mlx5_health_init(dev);
1381 mlx5_core_err(dev, "mlx5_health_init failed %d\n", err);
1385 mlx5_pagealloc_init(dev);
1387 err = mlx5_load_one(dev, priv, true);
1389 mlx5_core_err(dev, "mlx5_load_one failed %d\n", err);
1393 mlx5_fwdump_prep(dev);
1395 mlx5_firmware_update(dev);
1397 pci_save_state(bsddev);
1401 mlx5_pagealloc_cleanup(dev);
1402 mlx5_health_cleanup(dev);
1404 mlx5_pci_close(dev, priv);
1406 mtx_destroy(&dev->dump_lock);
1408 sysctl_ctx_free(&dev->sysctl_ctx);
1413 static void remove_one(struct pci_dev *pdev)
1415 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1416 struct mlx5_priv *priv = &dev->priv;
1418 if (mlx5_unload_one(dev, priv, true)) {
1419 mlx5_core_err(dev, "mlx5_unload_one failed\n");
1420 mlx5_health_cleanup(dev);
1424 mlx5_pagealloc_cleanup(dev);
1425 mlx5_health_cleanup(dev);
1426 mlx5_fwdump_clean(dev);
1427 mlx5_pci_close(dev, priv);
1428 mtx_destroy(&dev->dump_lock);
1429 pci_set_drvdata(pdev, NULL);
1430 sysctl_ctx_free(&dev->sysctl_ctx);
1434 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1435 pci_channel_state_t state)
1437 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1438 struct mlx5_priv *priv = &dev->priv;
1440 mlx5_core_info(dev, "%s was called\n", __func__);
1441 mlx5_enter_error_state(dev, false);
1442 mlx5_unload_one(dev, priv, false);
1445 mlx5_drain_health_wq(dev);
1446 mlx5_pci_disable_device(dev);
1449 return state == pci_channel_io_perm_failure ?
1450 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1453 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1455 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1458 mlx5_core_info(dev,"%s was called\n", __func__);
1460 err = mlx5_pci_enable_device(dev);
1462 mlx5_core_err(dev, "mlx5_pci_enable_device failed with error code: %d\n"
1464 return PCI_ERS_RESULT_DISCONNECT;
1466 pci_set_master(pdev);
1467 pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0);
1468 pci_restore_state(pdev->dev.bsddev);
1469 pci_save_state(pdev->dev.bsddev);
1471 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1474 /* wait for the device to show vital signs. For now we check
1475 * that we can read the device ID and that the health buffer
1476 * shows a non zero value which is different than 0xffffffff
1478 static void wait_vital(struct pci_dev *pdev)
1480 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1481 struct mlx5_core_health *health = &dev->priv.health;
1482 const int niter = 100;
1487 /* Wait for firmware to be ready after reset */
1489 for (i = 0; i < niter; i++) {
1490 if (pci_read_config_word(pdev, 2, &did)) {
1491 mlx5_core_warn(dev, "failed reading config word\n");
1494 if (did == pdev->device) {
1496 "device ID correctly read after %d iterations\n", i);
1502 mlx5_core_warn(dev, "could not read device ID\n");
1504 for (i = 0; i < niter; i++) {
1505 count = ioread32be(health->health_counter);
1506 if (count && count != 0xffffffff) {
1508 "Counter value 0x%x after %d iterations\n", count, i);
1515 mlx5_core_warn(dev, "could not read device ID\n");
1518 static void mlx5_pci_resume(struct pci_dev *pdev)
1520 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1521 struct mlx5_priv *priv = &dev->priv;
1524 mlx5_core_info(dev,"%s was called\n", __func__);
1528 err = mlx5_load_one(dev, priv, false);
1531 "mlx5_load_one failed with error code: %d\n" ,err);
1533 mlx5_core_info(dev,"device recovered\n");
1536 static const struct pci_error_handlers mlx5_err_handler = {
1537 .error_detected = mlx5_pci_err_detected,
1538 .slot_reset = mlx5_pci_slot_reset,
1539 .resume = mlx5_pci_resume
1542 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1544 bool fast_teardown, force_teardown;
1547 if (!mlx5_fast_unload_enabled) {
1548 mlx5_core_dbg(dev, "fast unload is disabled by user\n");
1552 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
1553 force_teardown = MLX5_CAP_GEN(dev, force_teardown);
1555 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
1556 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
1558 if (!fast_teardown && !force_teardown)
1561 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1562 mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
1566 /* Panic tear down fw command will stop the PCI bus communication
1567 * with the HCA, so the health polll is no longer needed.
1569 mlx5_drain_health_wq(dev);
1570 mlx5_stop_health_poll(dev, false);
1572 err = mlx5_cmd_fast_teardown_hca(dev);
1576 err = mlx5_cmd_force_teardown_hca(dev);
1580 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err);
1581 mlx5_start_health_poll(dev);
1584 mlx5_enter_error_state(dev, true);
1588 static void mlx5_disable_interrupts(struct mlx5_core_dev *mdev)
1590 int nvec = mdev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
1593 mdev->priv.disable_irqs = 1;
1595 /* wait for all IRQ handlers to finish processing */
1596 for (x = 0; x != nvec; x++)
1597 synchronize_irq(mdev->priv.msix_arr[x].vector);
1600 static void shutdown_one(struct pci_dev *pdev)
1602 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1603 struct mlx5_priv *priv = &dev->priv;
1606 /* enter polling mode */
1607 mlx5_cmd_use_polling(dev);
1609 /* disable all interrupts */
1610 mlx5_disable_interrupts(dev);
1612 err = mlx5_try_fast_unload(dev);
1614 mlx5_unload_one(dev, priv, false);
1615 mlx5_pci_disable_device(dev);
1618 static const struct pci_device_id mlx5_core_pci_table[] = {
1619 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
1620 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
1621 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
1622 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
1623 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
1624 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
1625 { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */
1626 { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */
1627 { PCI_VDEVICE(MELLANOX, 4121) },
1628 { PCI_VDEVICE(MELLANOX, 4122) },
1629 { PCI_VDEVICE(MELLANOX, 4123) },
1630 { PCI_VDEVICE(MELLANOX, 4124) },
1631 { PCI_VDEVICE(MELLANOX, 4125) },
1632 { PCI_VDEVICE(MELLANOX, 4126) },
1633 { PCI_VDEVICE(MELLANOX, 4127) },
1634 { PCI_VDEVICE(MELLANOX, 4128) },
1635 { PCI_VDEVICE(MELLANOX, 4129) },
1636 { PCI_VDEVICE(MELLANOX, 4130) },
1637 { PCI_VDEVICE(MELLANOX, 4131) },
1638 { PCI_VDEVICE(MELLANOX, 4132) },
1639 { PCI_VDEVICE(MELLANOX, 4133) },
1640 { PCI_VDEVICE(MELLANOX, 4134) },
1641 { PCI_VDEVICE(MELLANOX, 4135) },
1642 { PCI_VDEVICE(MELLANOX, 4136) },
1643 { PCI_VDEVICE(MELLANOX, 4137) },
1644 { PCI_VDEVICE(MELLANOX, 4138) },
1645 { PCI_VDEVICE(MELLANOX, 4139) },
1646 { PCI_VDEVICE(MELLANOX, 4140) },
1647 { PCI_VDEVICE(MELLANOX, 4141) },
1648 { PCI_VDEVICE(MELLANOX, 4142) },
1649 { PCI_VDEVICE(MELLANOX, 4143) },
1650 { PCI_VDEVICE(MELLANOX, 4144) },
1654 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1656 void mlx5_disable_device(struct mlx5_core_dev *dev)
1658 mlx5_pci_err_detected(dev->pdev, 0);
1661 void mlx5_recover_device(struct mlx5_core_dev *dev)
1663 mlx5_pci_disable_device(dev);
1664 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
1665 mlx5_pci_resume(dev->pdev);
1668 struct pci_driver mlx5_core_driver = {
1669 .name = DRIVER_NAME,
1670 .id_table = mlx5_core_pci_table,
1671 .shutdown = shutdown_one,
1673 .remove = remove_one,
1674 .err_handler = &mlx5_err_handler
1677 static int __init init(void)
1681 err = pci_register_driver(&mlx5_core_driver);
1685 err = mlx5_ctl_init();
1692 pci_unregister_driver(&mlx5_core_driver);
1698 static void __exit cleanup(void)
1701 pci_unregister_driver(&mlx5_core_driver);
1705 module_exit(cleanup);