2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/kmod.h>
29 #include <linux/module.h>
30 #include <linux/errno.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/slab.h>
34 #include <linux/io-mapping.h>
35 #include <linux/interrupt.h>
36 #include <linux/hardirq.h>
37 #include <dev/mlx5/driver.h>
38 #include <dev/mlx5/cq.h>
39 #include <dev/mlx5/qp.h>
40 #include <dev/mlx5/srq.h>
41 #include <linux/delay.h>
42 #include <dev/mlx5/mlx5_ifc.h>
43 #include <dev/mlx5/mlx5_fpga/core.h>
44 #include <dev/mlx5/mlx5_lib/mlx5.h>
45 #include "mlx5_core.h"
48 static const char mlx5_version[] = "Mellanox Core driver "
49 DRIVER_VERSION " (" DRIVER_RELDATE ")";
50 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
51 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
52 MODULE_LICENSE("Dual BSD/GPL");
53 MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1);
54 MODULE_DEPEND(mlx5, mlxfw, 1, 1, 1);
55 MODULE_DEPEND(mlx5, firmware, 1, 1, 1);
56 MODULE_VERSION(mlx5, 1);
58 SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW, 0, "mlx5 hardware controls");
60 int mlx5_core_debug_mask;
61 SYSCTL_INT(_hw_mlx5, OID_AUTO, debug_mask, CTLFLAG_RWTUN,
62 &mlx5_core_debug_mask, 0,
63 "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
65 #define MLX5_DEFAULT_PROF 2
66 static int mlx5_prof_sel = MLX5_DEFAULT_PROF;
67 SYSCTL_INT(_hw_mlx5, OID_AUTO, prof_sel, CTLFLAG_RWTUN,
69 "profile selector. Valid range 0 - 2");
71 static int mlx5_fast_unload_enabled = 1;
72 SYSCTL_INT(_hw_mlx5, OID_AUTO, fast_unload_enabled, CTLFLAG_RWTUN,
73 &mlx5_fast_unload_enabled, 0,
74 "Set to enable fast unload. Clear to disable.");
76 #define NUMA_NO_NODE -1
78 static LIST_HEAD(intf_list);
79 static LIST_HEAD(dev_list);
80 static DEFINE_MUTEX(intf_mutex);
82 struct mlx5_device_context {
83 struct list_head list;
84 struct mlx5_interface *intf;
89 MLX5_ATOMIC_REQ_MODE_BE = 0x0,
90 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
93 static struct mlx5_profile profiles[] = {
98 .mask = MLX5_PROF_MASK_QP_SIZE,
102 .mask = MLX5_PROF_MASK_QP_SIZE |
103 MLX5_PROF_MASK_MR_CACHE,
167 .mask = MLX5_PROF_MASK_QP_SIZE,
172 static int set_dma_caps(struct pci_dev *pdev)
176 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
178 device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit PCI DMA mask\n");
179 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
181 device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set PCI DMA mask, aborting\n");
186 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
188 device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit consistent PCI DMA mask\n");
189 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
191 device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set consistent PCI DMA mask, aborting\n");
196 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
200 int mlx5_pci_read_power_status(struct mlx5_core_dev *dev,
201 u16 *p_power, u8 *p_status)
203 u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {};
204 u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {};
207 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
208 MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MPEIN, 0, 0);
210 *p_status = MLX5_GET(mpein_reg, out, pwr_status);
211 *p_power = MLX5_GET(mpein_reg, out, pci_power);
215 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
217 struct pci_dev *pdev = dev->pdev;
220 mutex_lock(&dev->pci_status_mutex);
221 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
222 err = pci_enable_device(pdev);
224 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
226 mutex_unlock(&dev->pci_status_mutex);
231 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
233 struct pci_dev *pdev = dev->pdev;
235 mutex_lock(&dev->pci_status_mutex);
236 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
237 pci_disable_device(pdev);
238 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
240 mutex_unlock(&dev->pci_status_mutex);
243 static int request_bar(struct pci_dev *pdev)
247 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
248 device_printf((&pdev->dev)->bsddev, "ERR: ""Missing registers BAR, aborting\n");
252 err = pci_request_regions(pdev, DRIVER_NAME);
254 device_printf((&pdev->dev)->bsddev, "ERR: ""Couldn't get PCI resources, aborting\n");
259 static void release_bar(struct pci_dev *pdev)
261 pci_release_regions(pdev);
264 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
266 struct mlx5_priv *priv = &dev->priv;
267 struct mlx5_eq_table *table = &priv->eq_table;
268 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
269 int limit = dev->msix_eqvec;
270 int nvec = MLX5_EQ_VEC_COMP_BASE;
276 nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus();
278 nvec = min_t(int, nvec, num_eqs);
279 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
282 priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL);
284 priv->irq_info = kzalloc(nvec * sizeof(*priv->irq_info), GFP_KERNEL);
286 for (i = 0; i < nvec; i++)
287 priv->msix_arr[i].entry = i;
289 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
290 MLX5_EQ_VEC_COMP_BASE + 1, nvec);
294 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
300 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
302 struct mlx5_priv *priv = &dev->priv;
304 pci_disable_msix(dev->pdev);
305 kfree(priv->irq_info);
306 kfree(priv->msix_arr);
309 struct mlx5_reg_host_endianess {
315 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
318 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
319 MLX5_DEV_CAP_FLAG_DCT |
320 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR,
323 static u16 to_fw_pkey_sz(u32 size)
339 printf("mlx5_core: WARN: ""invalid pkey table size %d\n", size);
344 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
345 enum mlx5_cap_type cap_type,
346 enum mlx5_cap_mode cap_mode)
348 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
349 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
350 void *out, *hca_caps;
351 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
354 memset(in, 0, sizeof(in));
355 out = kzalloc(out_sz, GFP_KERNEL);
357 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
358 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
359 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
362 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
363 cap_type, cap_mode, err);
367 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
370 case HCA_CAP_OPMOD_GET_MAX:
371 memcpy(dev->hca_caps_max[cap_type], hca_caps,
372 MLX5_UN_SZ_BYTES(hca_cap_union));
374 case HCA_CAP_OPMOD_GET_CUR:
375 memcpy(dev->hca_caps_cur[cap_type], hca_caps,
376 MLX5_UN_SZ_BYTES(hca_cap_union));
380 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
390 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
394 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
398 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
401 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
403 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
405 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
407 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
410 static int handle_hca_cap(struct mlx5_core_dev *dev)
412 void *set_ctx = NULL;
413 struct mlx5_profile *prof = dev->profile;
415 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
418 set_ctx = kzalloc(set_sz, GFP_KERNEL);
420 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
424 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
426 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
427 MLX5_ST_SZ_BYTES(cmd_hca_cap));
429 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
430 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
432 /* we limit the size of the pkey table to 128 entries for now */
433 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
436 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
437 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
440 /* disable cmdif checksum */
441 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
443 /* enable drain sigerr */
444 MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1);
446 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
448 err = set_caps(dev, set_ctx, set_sz);
455 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
459 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
463 if (MLX5_CAP_GEN(dev, atomic)) {
464 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
473 supported_atomic_req_8B_endianess_mode_1);
475 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
478 set_ctx = kzalloc(set_sz, GFP_KERNEL);
482 MLX5_SET(set_hca_cap_in, set_ctx, op_mod,
483 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1);
484 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
486 /* Set requestor to host endianness */
487 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
488 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
490 err = set_caps(dev, set_ctx, set_sz);
496 static int set_hca_ctrl(struct mlx5_core_dev *dev)
498 struct mlx5_reg_host_endianess he_in;
499 struct mlx5_reg_host_endianess he_out;
502 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
503 !MLX5_CAP_GEN(dev, roce))
506 memset(&he_in, 0, sizeof(he_in));
507 he_in.he = MLX5_SET_HOST_ENDIANNESS;
508 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
509 &he_out, sizeof(he_out),
510 MLX5_REG_HOST_ENDIANNESS, 0, 1);
514 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
516 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
517 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
519 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
520 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
523 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
525 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
526 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
528 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
529 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
532 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
534 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
535 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
539 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
541 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out));
546 mlx5_cmd_mbox_status(query_out, &status, &syndrome);
547 if (status == MLX5_CMD_STAT_BAD_OP_ERR) {
548 pr_debug("Only ISSI 0 is supported\n");
552 printf("mlx5_core: ERR: ""failed to query ISSI\n");
556 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
558 if (sup_issi & (1 << 1)) {
559 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
560 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
562 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
563 MLX5_SET(set_issi_in, set_in, current_issi, 1);
565 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out));
567 printf("mlx5_core: ERR: ""failed to set ISSI=1 err(%d)\n", err);
574 } else if (sup_issi & (1 << 0)) {
582 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
584 struct mlx5_eq_table *table = &dev->priv.eq_table;
588 spin_lock(&table->lock);
589 list_for_each_entry(eq, &table->comp_eqs_list, list) {
590 if (eq->index == vector) {
597 spin_unlock(&table->lock);
601 EXPORT_SYMBOL(mlx5_vector2eqn);
603 int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name)
605 struct mlx5_priv *priv = &dev->priv;
606 struct mlx5_eq_table *table = &priv->eq_table;
610 spin_lock(&table->lock);
611 list_for_each_entry(eq, &table->comp_eqs_list, list) {
612 if (eq->index == eq_ix) {
613 int irq_ix = eq_ix + MLX5_EQ_VEC_COMP_BASE;
615 snprintf(priv->irq_info[irq_ix].name, MLX5_MAX_IRQ_NAME,
616 "%s-%d", name, eq_ix);
622 spin_unlock(&table->lock);
627 static void free_comp_eqs(struct mlx5_core_dev *dev)
629 struct mlx5_eq_table *table = &dev->priv.eq_table;
630 struct mlx5_eq *eq, *n;
632 spin_lock(&table->lock);
633 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
635 spin_unlock(&table->lock);
636 if (mlx5_destroy_unmap_eq(dev, eq))
637 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
640 spin_lock(&table->lock);
642 spin_unlock(&table->lock);
645 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
647 struct mlx5_eq_table *table = &dev->priv.eq_table;
648 char name[MLX5_MAX_IRQ_NAME];
655 INIT_LIST_HEAD(&table->comp_eqs_list);
656 ncomp_vec = table->num_comp_vectors;
657 nent = MLX5_COMP_EQ_SIZE;
658 for (i = 0; i < ncomp_vec; i++) {
659 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
661 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
662 err = mlx5_create_map_eq(dev, eq,
663 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
664 name, &dev->priv.uuari.uars[0]);
669 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
671 spin_lock(&table->lock);
672 list_add_tail(&eq->list, &table->comp_eqs_list);
673 spin_unlock(&table->lock);
683 static int map_bf_area(struct mlx5_core_dev *dev)
685 resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
686 resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
688 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
690 return dev->priv.bf_mapping ? 0 : -ENOMEM;
693 static void unmap_bf_area(struct mlx5_core_dev *dev)
695 if (dev->priv.bf_mapping)
696 io_mapping_free(dev->priv.bf_mapping);
699 static inline int fw_initializing(struct mlx5_core_dev *dev)
701 return ioread32be(&dev->iseg->initializing) >> 31;
704 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
706 u64 end = jiffies + msecs_to_jiffies(max_wait_mili);
709 while (fw_initializing(dev)) {
710 if (time_after(jiffies, end)) {
714 msleep(FW_INIT_WAIT_MS);
720 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
722 struct mlx5_device_context *dev_ctx;
723 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
725 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
729 dev_ctx->intf = intf;
730 CURVNET_SET_QUIET(vnet0);
731 dev_ctx->context = intf->add(dev);
734 if (dev_ctx->context) {
735 spin_lock_irq(&priv->ctx_lock);
736 list_add_tail(&dev_ctx->list, &priv->ctx_list);
737 spin_unlock_irq(&priv->ctx_lock);
743 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
745 struct mlx5_device_context *dev_ctx;
746 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
748 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
749 if (dev_ctx->intf == intf) {
750 spin_lock_irq(&priv->ctx_lock);
751 list_del(&dev_ctx->list);
752 spin_unlock_irq(&priv->ctx_lock);
754 intf->remove(dev, dev_ctx->context);
761 mlx5_register_device(struct mlx5_core_dev *dev)
763 struct mlx5_priv *priv = &dev->priv;
764 struct mlx5_interface *intf;
766 mutex_lock(&intf_mutex);
767 list_add_tail(&priv->dev_list, &dev_list);
768 list_for_each_entry(intf, &intf_list, list)
769 mlx5_add_device(intf, priv);
770 mutex_unlock(&intf_mutex);
776 mlx5_unregister_device(struct mlx5_core_dev *dev)
778 struct mlx5_priv *priv = &dev->priv;
779 struct mlx5_interface *intf;
781 mutex_lock(&intf_mutex);
782 list_for_each_entry(intf, &intf_list, list)
783 mlx5_remove_device(intf, priv);
784 list_del(&priv->dev_list);
785 mutex_unlock(&intf_mutex);
788 int mlx5_register_interface(struct mlx5_interface *intf)
790 struct mlx5_priv *priv;
792 if (!intf->add || !intf->remove)
795 mutex_lock(&intf_mutex);
796 list_add_tail(&intf->list, &intf_list);
797 list_for_each_entry(priv, &dev_list, dev_list)
798 mlx5_add_device(intf, priv);
799 mutex_unlock(&intf_mutex);
803 EXPORT_SYMBOL(mlx5_register_interface);
805 void mlx5_unregister_interface(struct mlx5_interface *intf)
807 struct mlx5_priv *priv;
809 mutex_lock(&intf_mutex);
810 list_for_each_entry(priv, &dev_list, dev_list)
811 mlx5_remove_device(intf, priv);
812 list_del(&intf->list);
813 mutex_unlock(&intf_mutex);
815 EXPORT_SYMBOL(mlx5_unregister_interface);
817 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
819 struct mlx5_priv *priv = &mdev->priv;
820 struct mlx5_device_context *dev_ctx;
824 spin_lock_irqsave(&priv->ctx_lock, flags);
826 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
827 if ((dev_ctx->intf->protocol == protocol) &&
828 dev_ctx->intf->get_dev) {
829 result = dev_ctx->intf->get_dev(dev_ctx->context);
833 spin_unlock_irqrestore(&priv->ctx_lock, flags);
837 EXPORT_SYMBOL(mlx5_get_protocol_dev);
839 static int mlx5_auto_fw_update;
840 SYSCTL_INT(_hw_mlx5, OID_AUTO, auto_fw_update, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
841 &mlx5_auto_fw_update, 0,
842 "Allow automatic firmware update on driver start");
844 mlx5_firmware_update(struct mlx5_core_dev *dev)
846 const struct firmware *fw;
849 TUNABLE_INT_FETCH("hw.mlx5.auto_fw_update", &mlx5_auto_fw_update);
850 if (!mlx5_auto_fw_update)
852 fw = firmware_get("mlx5fw_mfa");
854 err = mlx5_firmware_flash(dev, fw);
855 firmware_put(fw, FIRMWARE_UNLOAD);
863 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
865 struct pci_dev *pdev = dev->pdev;
868 pci_set_drvdata(dev->pdev, dev);
869 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
870 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
872 mutex_init(&priv->pgdir_mutex);
873 INIT_LIST_HEAD(&priv->pgdir_list);
874 spin_lock_init(&priv->mkey_lock);
876 priv->numa_node = NUMA_NO_NODE;
878 err = mlx5_pci_enable_device(dev);
880 device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI device, aborting\n");
884 err = request_bar(pdev);
886 device_printf((&pdev->dev)->bsddev, "ERR: ""error requesting BARs, aborting\n");
890 pci_set_master(pdev);
892 err = set_dma_caps(pdev);
894 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed setting DMA capabilities mask, aborting\n");
898 dev->iseg_base = pci_resource_start(dev->pdev, 0);
899 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
902 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n");
909 release_bar(dev->pdev);
911 mlx5_pci_disable_device(dev);
916 static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
919 release_bar(dev->pdev);
920 mlx5_pci_disable_device(dev);
923 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
925 struct pci_dev *pdev = dev->pdev;
928 err = mlx5_vsc_find_cap(dev);
930 dev_err(&pdev->dev, "Unable to find vendor specific capabilities\n");
932 err = mlx5_query_hca_caps(dev);
934 dev_err(&pdev->dev, "query hca failed\n");
938 err = mlx5_query_board_id(dev);
940 dev_err(&pdev->dev, "query board id failed\n");
944 err = mlx5_eq_init(dev);
946 dev_err(&pdev->dev, "failed to initialize eq\n");
950 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
952 err = mlx5_init_cq_table(dev);
954 dev_err(&pdev->dev, "failed to initialize cq table\n");
958 mlx5_init_qp_table(dev);
959 mlx5_init_srq_table(dev);
960 mlx5_init_mr_table(dev);
962 mlx5_init_reserved_gids(dev);
966 err = mlx5_init_rl_table(dev);
968 dev_err(&pdev->dev, "Failed to init rate limiting\n");
969 goto err_tables_cleanup;
976 mlx5_cleanup_mr_table(dev);
977 mlx5_cleanup_srq_table(dev);
978 mlx5_cleanup_qp_table(dev);
979 mlx5_cleanup_cq_table(dev);
983 mlx5_eq_cleanup(dev);
989 static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
992 mlx5_cleanup_rl_table(dev);
994 mlx5_fpga_cleanup(dev);
995 mlx5_cleanup_reserved_gids(dev);
996 mlx5_cleanup_mr_table(dev);
997 mlx5_cleanup_srq_table(dev);
998 mlx5_cleanup_qp_table(dev);
999 mlx5_cleanup_cq_table(dev);
1000 mlx5_eq_cleanup(dev);
1003 static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1006 struct pci_dev *pdev = dev->pdev;
1009 mutex_lock(&dev->intf_state_mutex);
1010 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1011 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
1016 device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
1019 * On load removing any previous indication of internal error,
1022 dev->state = MLX5_DEVICE_STATE_UP;
1024 err = mlx5_cmd_init(dev);
1026 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n");
1030 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
1032 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI);
1033 goto err_cmd_cleanup;
1036 err = mlx5_core_enable_hca(dev);
1038 device_printf((&pdev->dev)->bsddev, "ERR: ""enable hca failed\n");
1039 goto err_cmd_cleanup;
1042 err = mlx5_core_set_issi(dev);
1044 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to set issi\n");
1045 goto err_disable_hca;
1048 err = mlx5_pagealloc_start(dev);
1050 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pagealloc_start failed\n");
1051 goto err_disable_hca;
1054 err = mlx5_satisfy_startup_pages(dev, 1);
1056 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate boot pages\n");
1057 goto err_pagealloc_stop;
1060 err = set_hca_ctrl(dev);
1062 device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n");
1063 goto reclaim_boot_pages;
1066 err = handle_hca_cap(dev);
1068 device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n");
1069 goto reclaim_boot_pages;
1072 err = handle_hca_cap_atomic(dev);
1074 device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap_atomic failed\n");
1075 goto reclaim_boot_pages;
1078 err = mlx5_satisfy_startup_pages(dev, 0);
1080 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate init pages\n");
1081 goto reclaim_boot_pages;
1084 err = mlx5_cmd_init_hca(dev);
1086 device_printf((&pdev->dev)->bsddev, "ERR: ""init hca failed\n");
1087 goto reclaim_boot_pages;
1090 mlx5_start_health_poll(dev);
1092 if (boot && mlx5_init_once(dev, priv)) {
1093 dev_err(&pdev->dev, "sw objs init failed\n");
1097 err = mlx5_enable_msix(dev);
1099 device_printf((&pdev->dev)->bsddev, "ERR: ""enable msix failed\n");
1100 goto err_cleanup_once;
1103 err = mlx5_alloc_uuars(dev, &priv->uuari);
1105 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed allocating uar, aborting\n");
1106 goto err_disable_msix;
1109 err = mlx5_start_eqs(dev);
1111 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to start pages and async EQs\n");
1115 err = alloc_comp_eqs(dev);
1117 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to alloc completion EQs\n");
1121 if (map_bf_area(dev))
1122 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to map blue flame area\n");
1124 err = mlx5_init_fs(dev);
1126 mlx5_core_err(dev, "flow steering init %d\n", err);
1127 goto err_free_comp_eqs;
1130 err = mlx5_fpga_device_start(dev);
1132 dev_err(&pdev->dev, "fpga device start failed %d\n", err);
1133 goto err_fpga_start;
1136 err = mlx5_register_device(dev);
1138 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
1142 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1145 mutex_unlock(&dev->intf_state_mutex);
1150 mlx5_cleanup_fs(dev);
1160 mlx5_free_uuars(dev, &priv->uuari);
1163 mlx5_disable_msix(dev);
1167 mlx5_cleanup_once(dev);
1170 mlx5_stop_health_poll(dev, boot);
1171 if (mlx5_cmd_teardown_hca(dev)) {
1172 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
1177 mlx5_reclaim_startup_pages(dev);
1180 mlx5_pagealloc_stop(dev);
1183 mlx5_core_disable_hca(dev);
1186 mlx5_cmd_cleanup(dev);
1189 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1190 mutex_unlock(&dev->intf_state_mutex);
1195 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1201 mlx5_drain_health_recovery(dev);
1203 mutex_lock(&dev->intf_state_mutex);
1204 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1205 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", __func__);
1207 mlx5_cleanup_once(dev);
1211 mlx5_unregister_device(dev);
1213 mlx5_fpga_device_stop(dev);
1214 mlx5_cleanup_fs(dev);
1216 mlx5_wait_for_reclaim_vfs_pages(dev);
1219 mlx5_free_uuars(dev, &priv->uuari);
1220 mlx5_disable_msix(dev);
1222 mlx5_cleanup_once(dev);
1223 mlx5_stop_health_poll(dev, cleanup);
1224 err = mlx5_cmd_teardown_hca(dev);
1226 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
1229 mlx5_pagealloc_stop(dev);
1230 mlx5_reclaim_startup_pages(dev);
1231 mlx5_core_disable_hca(dev);
1232 mlx5_cmd_cleanup(dev);
1235 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1236 mutex_unlock(&dev->intf_state_mutex);
1240 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1241 unsigned long param)
1243 struct mlx5_priv *priv = &dev->priv;
1244 struct mlx5_device_context *dev_ctx;
1245 unsigned long flags;
1247 spin_lock_irqsave(&priv->ctx_lock, flags);
1249 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
1250 if (dev_ctx->intf->event)
1251 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
1253 spin_unlock_irqrestore(&priv->ctx_lock, flags);
1256 struct mlx5_core_event_handler {
1257 void (*event)(struct mlx5_core_dev *dev,
1258 enum mlx5_dev_event event,
1262 static int init_one(struct pci_dev *pdev,
1263 const struct pci_device_id *id)
1265 struct mlx5_core_dev *dev;
1266 struct mlx5_priv *priv;
1267 device_t bsddev = pdev->dev.bsddev;
1270 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1273 priv->pci_dev_data = id->driver_data;
1275 if (mlx5_prof_sel < 0 || mlx5_prof_sel >= ARRAY_SIZE(profiles)) {
1276 device_printf(bsddev, "WARN: selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF);
1277 mlx5_prof_sel = MLX5_DEFAULT_PROF;
1279 dev->profile = &profiles[mlx5_prof_sel];
1281 dev->event = mlx5_core_event;
1284 device_set_desc(bsddev, mlx5_version);
1286 sysctl_ctx_init(&dev->sysctl_ctx);
1287 SYSCTL_ADD_INT(&dev->sysctl_ctx,
1288 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1289 OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0,
1290 "Maximum number of MSIX event queue vectors, if set");
1291 SYSCTL_ADD_INT(&dev->sysctl_ctx,
1292 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1293 OID_AUTO, "power_status", CTLFLAG_RD, &dev->pwr_status, 0,
1294 "0:Invalid 1:Sufficient 2:Insufficient");
1295 SYSCTL_ADD_INT(&dev->sysctl_ctx,
1296 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1297 OID_AUTO, "power_value", CTLFLAG_RD, &dev->pwr_value, 0,
1298 "Current power value in Watts");
1300 INIT_LIST_HEAD(&priv->ctx_list);
1301 spin_lock_init(&priv->ctx_lock);
1302 mutex_init(&dev->pci_status_mutex);
1303 mutex_init(&dev->intf_state_mutex);
1304 err = mlx5_pci_init(dev, priv);
1306 device_printf(bsddev, "ERR: mlx5_pci_init failed %d\n", err);
1310 err = mlx5_health_init(dev);
1312 device_printf(bsddev, "ERR: mlx5_health_init failed %d\n", err);
1316 mlx5_pagealloc_init(dev);
1318 err = mlx5_load_one(dev, priv, true);
1320 device_printf(bsddev, "ERR: mlx5_load_one failed %d\n", err);
1324 mlx5_fwdump_prep(dev);
1326 mlx5_firmware_update(dev);
1328 pci_save_state(bsddev);
1332 mlx5_pagealloc_cleanup(dev);
1333 mlx5_health_cleanup(dev);
1335 mlx5_pci_close(dev, priv);
1337 sysctl_ctx_free(&dev->sysctl_ctx);
1342 static void remove_one(struct pci_dev *pdev)
1344 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1345 struct mlx5_priv *priv = &dev->priv;
1347 if (mlx5_unload_one(dev, priv, true)) {
1348 dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
1349 mlx5_health_cleanup(dev);
1353 mlx5_fwdump_clean(dev);
1354 mlx5_pagealloc_cleanup(dev);
1355 mlx5_health_cleanup(dev);
1356 mlx5_pci_close(dev, priv);
1357 pci_set_drvdata(pdev, NULL);
1358 sysctl_ctx_free(&dev->sysctl_ctx);
1362 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1363 pci_channel_state_t state)
1365 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1366 struct mlx5_priv *priv = &dev->priv;
1368 dev_info(&pdev->dev, "%s was called\n", __func__);
1369 mlx5_enter_error_state(dev, false);
1370 mlx5_unload_one(dev, priv, false);
1373 mlx5_drain_health_wq(dev);
1374 mlx5_pci_disable_device(dev);
1377 return state == pci_channel_io_perm_failure ?
1378 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1381 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1383 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1386 dev_info(&pdev->dev, "%s was called\n", __func__);
1388 err = mlx5_pci_enable_device(dev);
1390 dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
1392 return PCI_ERS_RESULT_DISCONNECT;
1394 pci_set_master(pdev);
1395 pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0);
1396 pci_restore_state(pdev->dev.bsddev);
1397 pci_save_state(pdev->dev.bsddev);
1399 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1402 /* wait for the device to show vital signs. For now we check
1403 * that we can read the device ID and that the health buffer
1404 * shows a non zero value which is different than 0xffffffff
1406 static void wait_vital(struct pci_dev *pdev)
1408 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1409 struct mlx5_core_health *health = &dev->priv.health;
1410 const int niter = 100;
1415 /* Wait for firmware to be ready after reset */
1417 for (i = 0; i < niter; i++) {
1418 if (pci_read_config_word(pdev, 2, &did)) {
1419 dev_warn(&pdev->dev, "failed reading config word\n");
1422 if (did == pdev->device) {
1423 dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
1429 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1431 for (i = 0; i < niter; i++) {
1432 count = ioread32be(health->health_counter);
1433 if (count && count != 0xffffffff) {
1434 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
1441 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1444 static void mlx5_pci_resume(struct pci_dev *pdev)
1446 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1447 struct mlx5_priv *priv = &dev->priv;
1450 dev_info(&pdev->dev, "%s was called\n", __func__);
1454 err = mlx5_load_one(dev, priv, false);
1456 dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
1459 dev_info(&pdev->dev, "%s: device recovered\n", __func__);
1462 static const struct pci_error_handlers mlx5_err_handler = {
1463 .error_detected = mlx5_pci_err_detected,
1464 .slot_reset = mlx5_pci_slot_reset,
1465 .resume = mlx5_pci_resume
1468 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1470 bool fast_teardown, force_teardown;
1473 if (!mlx5_fast_unload_enabled) {
1474 mlx5_core_dbg(dev, "fast unload is disabled by user\n");
1478 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
1479 force_teardown = MLX5_CAP_GEN(dev, force_teardown);
1481 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
1482 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
1484 if (!fast_teardown && !force_teardown)
1487 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1488 mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
1492 /* Panic tear down fw command will stop the PCI bus communication
1493 * with the HCA, so the health polll is no longer needed.
1495 mlx5_drain_health_wq(dev);
1496 mlx5_stop_health_poll(dev, false);
1498 err = mlx5_cmd_fast_teardown_hca(dev);
1502 err = mlx5_cmd_force_teardown_hca(dev);
1506 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err);
1507 mlx5_start_health_poll(dev);
1510 mlx5_enter_error_state(dev, true);
1514 static void mlx5_disable_interrupts(struct mlx5_core_dev *mdev)
1516 int nvec = mdev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
1519 mdev->priv.disable_irqs = 1;
1521 /* wait for all IRQ handlers to finish processing */
1522 for (x = 0; x != nvec; x++)
1523 synchronize_irq(mdev->priv.msix_arr[x].vector);
1526 static void shutdown_one(struct pci_dev *pdev)
1528 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1529 struct mlx5_priv *priv = &dev->priv;
1532 /* enter polling mode */
1533 mlx5_cmd_use_polling(dev);
1535 /* disable all interrupts */
1536 mlx5_disable_interrupts(dev);
1538 err = mlx5_try_fast_unload(dev);
1540 mlx5_unload_one(dev, priv, false);
1541 mlx5_pci_disable_device(dev);
1544 static const struct pci_device_id mlx5_core_pci_table[] = {
1545 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
1546 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
1547 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
1548 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
1549 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
1550 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
1551 { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */
1552 { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */
1553 { PCI_VDEVICE(MELLANOX, 4121) },
1554 { PCI_VDEVICE(MELLANOX, 4122) },
1555 { PCI_VDEVICE(MELLANOX, 4123) },
1556 { PCI_VDEVICE(MELLANOX, 4124) },
1557 { PCI_VDEVICE(MELLANOX, 4125) },
1558 { PCI_VDEVICE(MELLANOX, 4126) },
1559 { PCI_VDEVICE(MELLANOX, 4127) },
1560 { PCI_VDEVICE(MELLANOX, 4128) },
1561 { PCI_VDEVICE(MELLANOX, 4129) },
1562 { PCI_VDEVICE(MELLANOX, 4130) },
1563 { PCI_VDEVICE(MELLANOX, 4131) },
1564 { PCI_VDEVICE(MELLANOX, 4132) },
1565 { PCI_VDEVICE(MELLANOX, 4133) },
1566 { PCI_VDEVICE(MELLANOX, 4134) },
1567 { PCI_VDEVICE(MELLANOX, 4135) },
1568 { PCI_VDEVICE(MELLANOX, 4136) },
1569 { PCI_VDEVICE(MELLANOX, 4137) },
1570 { PCI_VDEVICE(MELLANOX, 4138) },
1571 { PCI_VDEVICE(MELLANOX, 4139) },
1572 { PCI_VDEVICE(MELLANOX, 4140) },
1573 { PCI_VDEVICE(MELLANOX, 4141) },
1574 { PCI_VDEVICE(MELLANOX, 4142) },
1575 { PCI_VDEVICE(MELLANOX, 4143) },
1576 { PCI_VDEVICE(MELLANOX, 4144) },
1580 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1582 void mlx5_disable_device(struct mlx5_core_dev *dev)
1584 mlx5_pci_err_detected(dev->pdev, 0);
1587 void mlx5_recover_device(struct mlx5_core_dev *dev)
1589 mlx5_pci_disable_device(dev);
1590 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
1591 mlx5_pci_resume(dev->pdev);
1594 struct pci_driver mlx5_core_driver = {
1595 .name = DRIVER_NAME,
1596 .id_table = mlx5_core_pci_table,
1597 .shutdown = shutdown_one,
1599 .remove = remove_one,
1600 .err_handler = &mlx5_err_handler
1603 static int __init init(void)
1607 err = pci_register_driver(&mlx5_core_driver);
1611 err = mlx5_fwdump_init();
1618 pci_unregister_driver(&mlx5_core_driver);
1624 static void __exit cleanup(void)
1627 pci_unregister_driver(&mlx5_core_driver);
1631 module_exit(cleanup);