2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <linux/kmod.h>
29 #include <linux/module.h>
30 #include <linux/errno.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/slab.h>
34 #include <linux/io-mapping.h>
35 #include <linux/interrupt.h>
36 #include <linux/hardirq.h>
37 #include <dev/mlx5/driver.h>
38 #include <dev/mlx5/cq.h>
39 #include <dev/mlx5/qp.h>
40 #include <dev/mlx5/srq.h>
41 #include <dev/mlx5/mpfs.h>
42 #include <dev/mlx5/vport.h>
43 #include <linux/delay.h>
44 #include <dev/mlx5/mlx5_ifc.h>
45 #include <dev/mlx5/mlx5_fpga/core.h>
46 #include <dev/mlx5/mlx5_lib/mlx5.h>
47 #include "mlx5_core.h"
52 #include <dev/pci/pci_iov.h>
53 #include <sys/iov_schema.h>
56 static const char mlx5_version[] = "Mellanox Core driver "
57 DRIVER_VERSION " (" DRIVER_RELDATE ")";
58 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
59 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
60 MODULE_LICENSE("Dual BSD/GPL");
61 MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1);
62 MODULE_DEPEND(mlx5, mlxfw, 1, 1, 1);
63 MODULE_DEPEND(mlx5, firmware, 1, 1, 1);
64 MODULE_VERSION(mlx5, 1);
66 SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
67 "mlx5 hardware controls");
69 int mlx5_core_debug_mask;
70 SYSCTL_INT(_hw_mlx5, OID_AUTO, debug_mask, CTLFLAG_RWTUN,
71 &mlx5_core_debug_mask, 0,
72 "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
74 #define MLX5_DEFAULT_PROF 2
75 static int mlx5_prof_sel = MLX5_DEFAULT_PROF;
76 SYSCTL_INT(_hw_mlx5, OID_AUTO, prof_sel, CTLFLAG_RWTUN,
78 "profile selector. Valid range 0 - 2");
80 static int mlx5_fast_unload_enabled = 1;
81 SYSCTL_INT(_hw_mlx5, OID_AUTO, fast_unload_enabled, CTLFLAG_RWTUN,
82 &mlx5_fast_unload_enabled, 0,
83 "Set to enable fast unload. Clear to disable.");
85 #define NUMA_NO_NODE -1
87 static LIST_HEAD(intf_list);
88 static LIST_HEAD(dev_list);
89 static DEFINE_MUTEX(intf_mutex);
91 struct mlx5_device_context {
92 struct list_head list;
93 struct mlx5_interface *intf;
98 MLX5_ATOMIC_REQ_MODE_BE = 0x0,
99 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
102 static struct mlx5_profile profiles[] = {
107 .mask = MLX5_PROF_MASK_QP_SIZE,
111 .mask = MLX5_PROF_MASK_QP_SIZE |
112 MLX5_PROF_MASK_MR_CACHE,
176 .mask = MLX5_PROF_MASK_QP_SIZE,
182 static const char iov_mac_addr_name[] = "mac-addr";
185 static int set_dma_caps(struct pci_dev *pdev)
187 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
190 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
192 mlx5_core_warn(dev, "couldn't set 64-bit PCI DMA mask\n");
193 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
195 mlx5_core_err(dev, "Can't set PCI DMA mask, aborting\n");
200 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
202 mlx5_core_warn(dev, "couldn't set 64-bit consistent PCI DMA mask\n");
203 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
205 mlx5_core_err(dev, "Can't set consistent PCI DMA mask, aborting\n");
210 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
214 int mlx5_pci_read_power_status(struct mlx5_core_dev *dev,
215 u16 *p_power, u8 *p_status)
217 u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {};
218 u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {};
221 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
222 MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MPEIN, 0, 0);
224 *p_status = MLX5_GET(mpein_reg, out, pwr_status);
225 *p_power = MLX5_GET(mpein_reg, out, pci_power);
229 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
231 struct pci_dev *pdev = dev->pdev;
234 mutex_lock(&dev->pci_status_mutex);
235 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
236 err = pci_enable_device(pdev);
238 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
240 mutex_unlock(&dev->pci_status_mutex);
245 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
247 struct pci_dev *pdev = dev->pdev;
249 mutex_lock(&dev->pci_status_mutex);
250 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
251 pci_disable_device(pdev);
252 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
254 mutex_unlock(&dev->pci_status_mutex);
257 static int request_bar(struct pci_dev *pdev)
259 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
262 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
263 mlx5_core_err(dev, "Missing registers BAR, aborting\n");
267 err = pci_request_regions(pdev, DRIVER_NAME);
269 mlx5_core_err(dev, "Couldn't get PCI resources, aborting\n");
274 static void release_bar(struct pci_dev *pdev)
276 pci_release_regions(pdev);
279 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
281 struct mlx5_priv *priv = &dev->priv;
282 struct mlx5_eq_table *table = &priv->eq_table;
283 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
284 int limit = dev->msix_eqvec;
285 int nvec = MLX5_EQ_VEC_COMP_BASE;
291 nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus();
296 nvec = 256; /* limit of firmware API */
297 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
300 priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL);
302 for (i = 0; i < nvec; i++)
303 priv->msix_arr[i].entry = i;
305 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
306 MLX5_EQ_VEC_COMP_BASE + 1, nvec);
310 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
314 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
316 struct mlx5_priv *priv = &dev->priv;
318 pci_disable_msix(dev->pdev);
319 kfree(priv->msix_arr);
322 struct mlx5_reg_host_endianess {
328 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
331 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
332 MLX5_DEV_CAP_FLAG_DCT |
333 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR,
336 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
352 mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
357 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
358 enum mlx5_cap_type cap_type,
359 enum mlx5_cap_mode cap_mode)
361 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
362 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
363 void *out, *hca_caps;
364 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
367 memset(in, 0, sizeof(in));
368 out = kzalloc(out_sz, GFP_KERNEL);
370 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
371 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
372 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
375 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
376 cap_type, cap_mode, err);
380 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
383 case HCA_CAP_OPMOD_GET_MAX:
384 memcpy(dev->hca_caps_max[cap_type], hca_caps,
385 MLX5_UN_SZ_BYTES(hca_cap_union));
387 case HCA_CAP_OPMOD_GET_CUR:
388 memcpy(dev->hca_caps_cur[cap_type], hca_caps,
389 MLX5_UN_SZ_BYTES(hca_cap_union));
393 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
403 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
407 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
411 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
414 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
416 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
418 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
420 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
423 static int handle_hca_cap(struct mlx5_core_dev *dev)
425 void *set_ctx = NULL;
426 struct mlx5_profile *prof = dev->profile;
428 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
431 set_ctx = kzalloc(set_sz, GFP_KERNEL);
433 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
437 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
439 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
440 MLX5_ST_SZ_BYTES(cmd_hca_cap));
442 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
443 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
445 /* we limit the size of the pkey table to 128 entries for now */
446 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
447 to_fw_pkey_sz(dev, 128));
449 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
450 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
453 /* disable cmdif checksum */
454 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
456 /* enable drain sigerr */
457 MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1);
459 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
461 err = set_caps(dev, set_ctx, set_sz);
468 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
472 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
476 if (MLX5_CAP_GEN(dev, atomic)) {
477 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
486 supported_atomic_req_8B_endianess_mode_1);
488 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
491 set_ctx = kzalloc(set_sz, GFP_KERNEL);
495 MLX5_SET(set_hca_cap_in, set_ctx, op_mod,
496 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1);
497 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
499 /* Set requestor to host endianness */
500 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
501 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
503 err = set_caps(dev, set_ctx, set_sz);
509 static int set_hca_ctrl(struct mlx5_core_dev *dev)
511 struct mlx5_reg_host_endianess he_in;
512 struct mlx5_reg_host_endianess he_out;
515 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
516 !MLX5_CAP_GEN(dev, roce))
519 memset(&he_in, 0, sizeof(he_in));
520 he_in.he = MLX5_SET_HOST_ENDIANNESS;
521 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
522 &he_out, sizeof(he_out),
523 MLX5_REG_HOST_ENDIANNESS, 0, 1);
527 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
529 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
530 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
532 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
533 MLX5_SET(enable_hca_in, in, function_id, func_id);
534 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
537 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
539 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
540 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
542 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
543 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
546 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
548 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
549 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
553 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
555 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out));
560 mlx5_cmd_mbox_status(query_out, &status, &syndrome);
561 if (status == MLX5_CMD_STAT_BAD_OP_ERR) {
562 mlx5_core_dbg(dev, "Only ISSI 0 is supported\n");
566 mlx5_core_err(dev, "failed to query ISSI\n");
570 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
572 if (sup_issi & (1 << 1)) {
573 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
574 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
576 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
577 MLX5_SET(set_issi_in, set_in, current_issi, 1);
579 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out));
581 mlx5_core_err(dev, "failed to set ISSI=1 err(%d)\n", err);
588 } else if (sup_issi & (1 << 0)) {
596 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
598 struct mlx5_eq_table *table = &dev->priv.eq_table;
602 spin_lock(&table->lock);
603 list_for_each_entry(eq, &table->comp_eqs_list, list) {
604 if (eq->index == vector) {
611 spin_unlock(&table->lock);
615 EXPORT_SYMBOL(mlx5_vector2eqn);
617 static void free_comp_eqs(struct mlx5_core_dev *dev)
619 struct mlx5_eq_table *table = &dev->priv.eq_table;
620 struct mlx5_eq *eq, *n;
622 spin_lock(&table->lock);
623 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
625 spin_unlock(&table->lock);
626 if (mlx5_destroy_unmap_eq(dev, eq))
627 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
630 spin_lock(&table->lock);
632 spin_unlock(&table->lock);
635 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
637 struct mlx5_eq_table *table = &dev->priv.eq_table;
644 INIT_LIST_HEAD(&table->comp_eqs_list);
645 ncomp_vec = table->num_comp_vectors;
646 nent = MLX5_COMP_EQ_SIZE;
647 for (i = 0; i < ncomp_vec; i++) {
648 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
650 err = mlx5_create_map_eq(dev, eq,
651 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
652 &dev->priv.uuari.uars[0]);
657 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
659 spin_lock(&table->lock);
660 list_add_tail(&eq->list, &table->comp_eqs_list);
661 spin_unlock(&table->lock);
671 static int map_bf_area(struct mlx5_core_dev *dev)
673 resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
674 resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
676 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
678 return dev->priv.bf_mapping ? 0 : -ENOMEM;
681 static void unmap_bf_area(struct mlx5_core_dev *dev)
683 if (dev->priv.bf_mapping)
684 io_mapping_free(dev->priv.bf_mapping);
687 static inline int fw_initializing(struct mlx5_core_dev *dev)
689 return ioread32be(&dev->iseg->initializing) >> 31;
692 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
695 int warn = jiffies + msecs_to_jiffies(warn_time_mili);
696 int end = jiffies + msecs_to_jiffies(max_wait_mili);
699 MPASS(max_wait_mili > warn_time_mili);
701 while (fw_initializing(dev) == 1) {
702 if (time_after(jiffies, end)) {
706 if (warn_time_mili && time_after(jiffies, warn)) {
708 "Waiting for FW initialization, timeout abort in %u s\n",
709 (unsigned int)(jiffies_to_msecs(end - warn) / 1000));
710 warn = jiffies + msecs_to_jiffies(warn_time_mili);
712 msleep(FW_INIT_WAIT_MS);
716 mlx5_core_dbg(dev, "Full initializing bit dword = 0x%x\n",
717 ioread32be(&dev->iseg->initializing));
722 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
724 struct mlx5_device_context *dev_ctx;
725 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
727 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
731 dev_ctx->intf = intf;
732 CURVNET_SET_QUIET(vnet0);
733 dev_ctx->context = intf->add(dev);
736 if (dev_ctx->context) {
737 spin_lock_irq(&priv->ctx_lock);
738 list_add_tail(&dev_ctx->list, &priv->ctx_list);
739 spin_unlock_irq(&priv->ctx_lock);
745 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
747 struct mlx5_device_context *dev_ctx;
748 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
750 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
751 if (dev_ctx->intf == intf) {
752 spin_lock_irq(&priv->ctx_lock);
753 list_del(&dev_ctx->list);
754 spin_unlock_irq(&priv->ctx_lock);
756 intf->remove(dev, dev_ctx->context);
763 mlx5_register_device(struct mlx5_core_dev *dev)
765 struct mlx5_priv *priv = &dev->priv;
766 struct mlx5_interface *intf;
768 mutex_lock(&intf_mutex);
769 list_add_tail(&priv->dev_list, &dev_list);
770 list_for_each_entry(intf, &intf_list, list)
771 mlx5_add_device(intf, priv);
772 mutex_unlock(&intf_mutex);
778 mlx5_unregister_device(struct mlx5_core_dev *dev)
780 struct mlx5_priv *priv = &dev->priv;
781 struct mlx5_interface *intf;
783 mutex_lock(&intf_mutex);
784 list_for_each_entry(intf, &intf_list, list)
785 mlx5_remove_device(intf, priv);
786 list_del(&priv->dev_list);
787 mutex_unlock(&intf_mutex);
790 int mlx5_register_interface(struct mlx5_interface *intf)
792 struct mlx5_priv *priv;
794 if (!intf->add || !intf->remove)
797 mutex_lock(&intf_mutex);
798 list_add_tail(&intf->list, &intf_list);
799 list_for_each_entry(priv, &dev_list, dev_list)
800 mlx5_add_device(intf, priv);
801 mutex_unlock(&intf_mutex);
805 EXPORT_SYMBOL(mlx5_register_interface);
807 void mlx5_unregister_interface(struct mlx5_interface *intf)
809 struct mlx5_priv *priv;
811 mutex_lock(&intf_mutex);
812 list_for_each_entry(priv, &dev_list, dev_list)
813 mlx5_remove_device(intf, priv);
814 list_del(&intf->list);
815 mutex_unlock(&intf_mutex);
817 EXPORT_SYMBOL(mlx5_unregister_interface);
819 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
821 struct mlx5_priv *priv = &mdev->priv;
822 struct mlx5_device_context *dev_ctx;
826 spin_lock_irqsave(&priv->ctx_lock, flags);
828 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
829 if ((dev_ctx->intf->protocol == protocol) &&
830 dev_ctx->intf->get_dev) {
831 result = dev_ctx->intf->get_dev(dev_ctx->context);
835 spin_unlock_irqrestore(&priv->ctx_lock, flags);
839 EXPORT_SYMBOL(mlx5_get_protocol_dev);
841 static int mlx5_auto_fw_update;
842 SYSCTL_INT(_hw_mlx5, OID_AUTO, auto_fw_update, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
843 &mlx5_auto_fw_update, 0,
844 "Allow automatic firmware update on driver start");
846 mlx5_firmware_update(struct mlx5_core_dev *dev)
848 const struct firmware *fw;
851 TUNABLE_INT_FETCH("hw.mlx5.auto_fw_update", &mlx5_auto_fw_update);
852 if (!mlx5_auto_fw_update)
854 fw = firmware_get("mlx5fw_mfa");
856 err = mlx5_firmware_flash(dev, fw);
857 firmware_put(fw, FIRMWARE_UNLOAD);
865 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
867 struct pci_dev *pdev = dev->pdev;
872 bsddev = pdev->dev.bsddev;
873 pci_set_drvdata(dev->pdev, dev);
874 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
875 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
877 mutex_init(&priv->pgdir_mutex);
878 INIT_LIST_HEAD(&priv->pgdir_list);
879 spin_lock_init(&priv->mkey_lock);
881 priv->numa_node = NUMA_NO_NODE;
883 err = mlx5_pci_enable_device(dev);
885 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
889 err = request_bar(pdev);
891 mlx5_core_err(dev, "error requesting BARs, aborting\n");
895 pci_set_master(pdev);
897 err = set_dma_caps(pdev);
899 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
903 dev->iseg_base = pci_resource_start(dev->pdev, 0);
904 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
907 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
914 release_bar(dev->pdev);
916 mlx5_pci_disable_device(dev);
921 static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
924 if (MLX5_CAP_GEN(dev, eswitch_flow_table))
925 pci_iov_detach(dev->pdev->dev.bsddev);
928 release_bar(dev->pdev);
929 mlx5_pci_disable_device(dev);
932 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
936 err = mlx5_vsc_find_cap(dev);
938 mlx5_core_err(dev, "Unable to find vendor specific capabilities\n");
940 err = mlx5_query_hca_caps(dev);
942 mlx5_core_err(dev, "query hca failed\n");
946 err = mlx5_query_board_id(dev);
948 mlx5_core_err(dev, "query board id failed\n");
952 err = mlx5_eq_init(dev);
954 mlx5_core_err(dev, "failed to initialize eq\n");
958 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
960 err = mlx5_init_cq_table(dev);
962 mlx5_core_err(dev, "failed to initialize cq table\n");
966 mlx5_init_qp_table(dev);
967 mlx5_init_srq_table(dev);
968 mlx5_init_mr_table(dev);
970 mlx5_init_reserved_gids(dev);
974 err = mlx5_init_rl_table(dev);
976 mlx5_core_err(dev, "Failed to init rate limiting\n");
977 goto err_tables_cleanup;
984 mlx5_cleanup_mr_table(dev);
985 mlx5_cleanup_srq_table(dev);
986 mlx5_cleanup_qp_table(dev);
987 mlx5_cleanup_cq_table(dev);
991 mlx5_eq_cleanup(dev);
997 static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
1000 mlx5_cleanup_rl_table(dev);
1002 mlx5_fpga_cleanup(dev);
1003 mlx5_cleanup_reserved_gids(dev);
1004 mlx5_cleanup_mr_table(dev);
1005 mlx5_cleanup_srq_table(dev);
1006 mlx5_cleanup_qp_table(dev);
1007 mlx5_cleanup_cq_table(dev);
1008 mlx5_eq_cleanup(dev);
1011 static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1016 mutex_lock(&dev->intf_state_mutex);
1017 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1018 mlx5_core_warn(dev, "interface is up, NOP\n");
1022 mlx5_core_dbg(dev, "firmware version: %d.%d.%d\n",
1023 fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
1026 * On load removing any previous indication of internal error,
1029 dev->state = MLX5_DEVICE_STATE_UP;
1031 /* wait for firmware to accept initialization segments configurations
1033 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI,
1034 FW_INIT_WARN_MESSAGE_INTERVAL);
1036 dev_err(&dev->pdev->dev,
1037 "Firmware over %d MS in pre-initializing state, aborting\n",
1038 FW_PRE_INIT_TIMEOUT_MILI);
1042 err = mlx5_cmd_init(dev);
1045 "Failed initializing command interface, aborting\n");
1049 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
1052 "Firmware over %d MS in initializing state, aborting\n",
1053 FW_INIT_TIMEOUT_MILI);
1054 goto err_cmd_cleanup;
1057 err = mlx5_core_enable_hca(dev, 0);
1059 mlx5_core_err(dev, "enable hca failed\n");
1060 goto err_cmd_cleanup;
1063 err = mlx5_core_set_issi(dev);
1065 mlx5_core_err(dev, "failed to set issi\n");
1066 goto err_disable_hca;
1069 err = mlx5_pagealloc_start(dev);
1071 mlx5_core_err(dev, "mlx5_pagealloc_start failed\n");
1072 goto err_disable_hca;
1075 err = mlx5_satisfy_startup_pages(dev, 1);
1077 mlx5_core_err(dev, "failed to allocate boot pages\n");
1078 goto err_pagealloc_stop;
1081 err = set_hca_ctrl(dev);
1083 mlx5_core_err(dev, "set_hca_ctrl failed\n");
1084 goto reclaim_boot_pages;
1087 err = handle_hca_cap(dev);
1089 mlx5_core_err(dev, "handle_hca_cap failed\n");
1090 goto reclaim_boot_pages;
1093 err = handle_hca_cap_atomic(dev);
1095 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
1096 goto reclaim_boot_pages;
1099 err = mlx5_satisfy_startup_pages(dev, 0);
1101 mlx5_core_err(dev, "failed to allocate init pages\n");
1102 goto reclaim_boot_pages;
1105 err = mlx5_cmd_init_hca(dev);
1107 mlx5_core_err(dev, "init hca failed\n");
1108 goto reclaim_boot_pages;
1111 mlx5_start_health_poll(dev);
1113 if (boot && mlx5_init_once(dev, priv)) {
1114 mlx5_core_err(dev, "sw objs init failed\n");
1118 err = mlx5_enable_msix(dev);
1120 mlx5_core_err(dev, "enable msix failed\n");
1121 goto err_cleanup_once;
1124 err = mlx5_alloc_uuars(dev, &priv->uuari);
1126 mlx5_core_err(dev, "Failed allocating uar, aborting\n");
1127 goto err_disable_msix;
1130 err = mlx5_start_eqs(dev);
1132 mlx5_core_err(dev, "Failed to start pages and async EQs\n");
1136 err = alloc_comp_eqs(dev);
1138 mlx5_core_err(dev, "Failed to alloc completion EQs\n");
1142 if (map_bf_area(dev))
1143 mlx5_core_err(dev, "Failed to map blue flame area\n");
1145 err = mlx5_init_fs(dev);
1147 mlx5_core_err(dev, "flow steering init %d\n", err);
1148 goto err_free_comp_eqs;
1151 err = mlx5_mpfs_init(dev);
1153 mlx5_core_err(dev, "mpfs init failed %d\n", err);
1157 err = mlx5_fpga_device_start(dev);
1159 mlx5_core_err(dev, "fpga device start failed %d\n", err);
1163 err = mlx5_register_device(dev);
1165 mlx5_core_err(dev, "mlx5_register_device failed %d\n", err);
1169 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1172 mutex_unlock(&dev->intf_state_mutex);
1176 mlx5_fpga_device_stop(dev);
1179 mlx5_mpfs_destroy(dev);
1182 mlx5_cleanup_fs(dev);
1192 mlx5_free_uuars(dev, &priv->uuari);
1195 mlx5_disable_msix(dev);
1199 mlx5_cleanup_once(dev);
1202 mlx5_stop_health_poll(dev, boot);
1203 if (mlx5_cmd_teardown_hca(dev)) {
1204 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
1209 mlx5_reclaim_startup_pages(dev);
1212 mlx5_pagealloc_stop(dev);
1215 mlx5_core_disable_hca(dev);
1218 mlx5_cmd_cleanup(dev);
1221 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1222 mutex_unlock(&dev->intf_state_mutex);
1227 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1233 mlx5_drain_health_recovery(dev);
1235 mutex_lock(&dev->intf_state_mutex);
1236 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1237 mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__);
1239 mlx5_cleanup_once(dev);
1243 mlx5_unregister_device(dev);
1245 mlx5_eswitch_cleanup(dev->priv.eswitch);
1246 mlx5_fpga_device_stop(dev);
1247 mlx5_mpfs_destroy(dev);
1248 mlx5_cleanup_fs(dev);
1250 mlx5_wait_for_reclaim_vfs_pages(dev);
1253 mlx5_free_uuars(dev, &priv->uuari);
1254 mlx5_disable_msix(dev);
1256 mlx5_cleanup_once(dev);
1257 mlx5_stop_health_poll(dev, cleanup);
1258 err = mlx5_cmd_teardown_hca(dev);
1260 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
1263 mlx5_pagealloc_stop(dev);
1264 mlx5_reclaim_startup_pages(dev);
1265 mlx5_core_disable_hca(dev);
1266 mlx5_cmd_cleanup(dev);
1269 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1270 mutex_unlock(&dev->intf_state_mutex);
1274 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1275 unsigned long param)
1277 struct mlx5_priv *priv = &dev->priv;
1278 struct mlx5_device_context *dev_ctx;
1279 unsigned long flags;
1281 spin_lock_irqsave(&priv->ctx_lock, flags);
1283 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
1284 if (dev_ctx->intf->event)
1285 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
1287 spin_unlock_irqrestore(&priv->ctx_lock, flags);
1290 struct mlx5_core_event_handler {
1291 void (*event)(struct mlx5_core_dev *dev,
1292 enum mlx5_dev_event event,
1296 #define MLX5_STATS_DESC(a, b, c, d, e, ...) d, e,
1298 #define MLX5_PORT_MODULE_ERROR_STATS(m) \
1299 m(+1, u64, power_budget_exceeded, "power_budget", "Module Power Budget Exceeded") \
1300 m(+1, u64, long_range, "long_range", "Module Long Range for non MLNX cable/module") \
1301 m(+1, u64, bus_stuck, "bus_stuck", "Module Bus stuck(I2C or data shorted)") \
1302 m(+1, u64, no_eeprom, "no_eeprom", "No EEPROM/retry timeout") \
1303 m(+1, u64, enforce_part_number, "enforce_part_number", "Module Enforce part number list") \
1304 m(+1, u64, unknown_id, "unknown_id", "Module Unknown identifier") \
1305 m(+1, u64, high_temp, "high_temp", "Module High Temperature") \
1306 m(+1, u64, cable_shorted, "cable_shorted", "Module Cable is shorted")
1308 static const char *mlx5_pme_err_desc[] = {
1309 MLX5_PORT_MODULE_ERROR_STATS(MLX5_STATS_DESC)
1312 static int init_one(struct pci_dev *pdev,
1313 const struct pci_device_id *id)
1315 struct mlx5_core_dev *dev;
1316 struct mlx5_priv *priv;
1317 device_t bsddev = pdev->dev.bsddev;
1319 nvlist_t *pf_schema, *vf_schema;
1320 int num_vfs, sriov_pos;
1323 struct sysctl_oid *pme_sysctl_node;
1324 struct sysctl_oid *pme_err_sysctl_node;
1325 struct sysctl_oid *cap_sysctl_node;
1326 struct sysctl_oid *current_cap_sysctl_node;
1327 struct sysctl_oid *max_cap_sysctl_node;
1329 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1332 priv->pci_dev_data = id->driver_data;
1334 if (mlx5_prof_sel < 0 || mlx5_prof_sel >= ARRAY_SIZE(profiles)) {
1335 device_printf(bsddev,
1336 "WARN: selected profile out of range, selecting default (%d)\n",
1338 mlx5_prof_sel = MLX5_DEFAULT_PROF;
1340 dev->profile = &profiles[mlx5_prof_sel];
1342 dev->event = mlx5_core_event;
1345 device_set_desc(bsddev, mlx5_version);
1347 sysctl_ctx_init(&dev->sysctl_ctx);
1348 SYSCTL_ADD_INT(&dev->sysctl_ctx,
1349 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1350 OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0,
1351 "Maximum number of MSIX event queue vectors, if set");
1352 SYSCTL_ADD_INT(&dev->sysctl_ctx,
1353 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1354 OID_AUTO, "power_status", CTLFLAG_RD, &dev->pwr_status, 0,
1355 "0:Invalid 1:Sufficient 2:Insufficient");
1356 SYSCTL_ADD_INT(&dev->sysctl_ctx,
1357 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1358 OID_AUTO, "power_value", CTLFLAG_RD, &dev->pwr_value, 0,
1359 "Current power value in Watts");
1361 pme_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
1362 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1363 OID_AUTO, "pme_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1364 "Port module event statistics");
1365 if (pme_sysctl_node == NULL) {
1367 goto clean_sysctl_ctx;
1369 pme_err_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
1370 SYSCTL_CHILDREN(pme_sysctl_node),
1371 OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1372 "Port module event error statistics");
1373 if (pme_err_sysctl_node == NULL) {
1375 goto clean_sysctl_ctx;
1377 SYSCTL_ADD_U64(&dev->sysctl_ctx,
1378 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO,
1379 "module_plug", CTLFLAG_RD | CTLFLAG_MPSAFE,
1380 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_PLUGGED_ENABLED],
1381 0, "Number of time module plugged");
1382 SYSCTL_ADD_U64(&dev->sysctl_ctx,
1383 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO,
1384 "module_unplug", CTLFLAG_RD | CTLFLAG_MPSAFE,
1385 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_UNPLUGGED],
1386 0, "Number of time module unplugged");
1387 for (i = 0 ; i < MLX5_MODULE_EVENT_ERROR_NUM; i++) {
1388 SYSCTL_ADD_U64(&dev->sysctl_ctx,
1389 SYSCTL_CHILDREN(pme_err_sysctl_node), OID_AUTO,
1390 mlx5_pme_err_desc[2 * i], CTLFLAG_RD | CTLFLAG_MPSAFE,
1391 &dev->priv.pme_stats.error_counters[i],
1392 0, mlx5_pme_err_desc[2 * i + 1]);
1395 cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
1396 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1397 OID_AUTO, "caps", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1398 "hardware capabilities raw bitstrings");
1399 if (cap_sysctl_node == NULL) {
1401 goto clean_sysctl_ctx;
1403 current_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
1404 SYSCTL_CHILDREN(cap_sysctl_node),
1405 OID_AUTO, "current", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1407 if (current_cap_sysctl_node == NULL) {
1409 goto clean_sysctl_ctx;
1411 max_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
1412 SYSCTL_CHILDREN(cap_sysctl_node),
1413 OID_AUTO, "max", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1415 if (max_cap_sysctl_node == NULL) {
1417 goto clean_sysctl_ctx;
1419 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1420 SYSCTL_CHILDREN(current_cap_sysctl_node),
1421 OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE,
1422 &dev->hca_caps_cur[MLX5_CAP_GENERAL],
1423 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1424 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1425 SYSCTL_CHILDREN(max_cap_sysctl_node),
1426 OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE,
1427 &dev->hca_caps_max[MLX5_CAP_GENERAL],
1428 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1429 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1430 SYSCTL_CHILDREN(current_cap_sysctl_node),
1431 OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE,
1432 &dev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS],
1433 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1434 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1435 SYSCTL_CHILDREN(max_cap_sysctl_node),
1436 OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE,
1437 &dev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS],
1438 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1439 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1440 SYSCTL_CHILDREN(current_cap_sysctl_node),
1441 OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE,
1442 &dev->hca_caps_cur[MLX5_CAP_ODP],
1443 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1444 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1445 SYSCTL_CHILDREN(max_cap_sysctl_node),
1446 OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE,
1447 &dev->hca_caps_max[MLX5_CAP_ODP],
1448 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1449 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1450 SYSCTL_CHILDREN(current_cap_sysctl_node),
1451 OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE,
1452 &dev->hca_caps_cur[MLX5_CAP_ATOMIC],
1453 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1454 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1455 SYSCTL_CHILDREN(max_cap_sysctl_node),
1456 OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE,
1457 &dev->hca_caps_max[MLX5_CAP_ATOMIC],
1458 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1459 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1460 SYSCTL_CHILDREN(current_cap_sysctl_node),
1461 OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE,
1462 &dev->hca_caps_cur[MLX5_CAP_ROCE],
1463 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1464 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1465 SYSCTL_CHILDREN(max_cap_sysctl_node),
1466 OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE,
1467 &dev->hca_caps_max[MLX5_CAP_ROCE],
1468 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1469 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1470 SYSCTL_CHILDREN(current_cap_sysctl_node),
1471 OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE,
1472 &dev->hca_caps_cur[MLX5_CAP_IPOIB_OFFLOADS],
1473 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1474 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1475 SYSCTL_CHILDREN(max_cap_sysctl_node),
1476 OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE,
1477 &dev->hca_caps_max[MLX5_CAP_IPOIB_OFFLOADS],
1478 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1479 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1480 SYSCTL_CHILDREN(current_cap_sysctl_node),
1481 OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE,
1482 &dev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS],
1483 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1484 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1485 SYSCTL_CHILDREN(max_cap_sysctl_node),
1486 OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE,
1487 &dev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS],
1488 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1489 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1490 SYSCTL_CHILDREN(current_cap_sysctl_node),
1491 OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE,
1492 &dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE],
1493 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1494 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1495 SYSCTL_CHILDREN(max_cap_sysctl_node),
1496 OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE,
1497 &dev->hca_caps_max[MLX5_CAP_FLOW_TABLE],
1498 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1499 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1500 SYSCTL_CHILDREN(current_cap_sysctl_node),
1501 OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE,
1502 &dev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE],
1503 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1504 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1505 SYSCTL_CHILDREN(max_cap_sysctl_node),
1506 OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE,
1507 &dev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE],
1508 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1509 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1510 SYSCTL_CHILDREN(current_cap_sysctl_node),
1511 OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE,
1512 &dev->hca_caps_cur[MLX5_CAP_ESWITCH],
1513 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1514 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1515 SYSCTL_CHILDREN(max_cap_sysctl_node),
1516 OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE,
1517 &dev->hca_caps_max[MLX5_CAP_ESWITCH],
1518 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1519 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1520 SYSCTL_CHILDREN(current_cap_sysctl_node),
1521 OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE,
1522 &dev->hca_caps_cur[MLX5_CAP_SNAPSHOT],
1523 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1524 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1525 SYSCTL_CHILDREN(max_cap_sysctl_node),
1526 OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE,
1527 &dev->hca_caps_max[MLX5_CAP_SNAPSHOT],
1528 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1529 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1530 SYSCTL_CHILDREN(current_cap_sysctl_node),
1531 OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE,
1532 &dev->hca_caps_cur[MLX5_CAP_VECTOR_CALC],
1533 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1534 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1535 SYSCTL_CHILDREN(max_cap_sysctl_node),
1536 OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE,
1537 &dev->hca_caps_max[MLX5_CAP_VECTOR_CALC],
1538 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1539 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1540 SYSCTL_CHILDREN(current_cap_sysctl_node),
1541 OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE,
1542 &dev->hca_caps_cur[MLX5_CAP_QOS],
1543 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1544 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1545 SYSCTL_CHILDREN(max_cap_sysctl_node),
1546 OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE,
1547 &dev->hca_caps_max[MLX5_CAP_QOS],
1548 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1549 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1550 SYSCTL_CHILDREN(current_cap_sysctl_node),
1551 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE,
1552 &dev->hca_caps_cur[MLX5_CAP_DEBUG],
1553 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1554 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1555 SYSCTL_CHILDREN(max_cap_sysctl_node),
1556 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE,
1557 &dev->hca_caps_max[MLX5_CAP_DEBUG],
1558 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1559 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1560 SYSCTL_CHILDREN(cap_sysctl_node),
1561 OID_AUTO, "pcam", CTLFLAG_RD | CTLFLAG_MPSAFE,
1562 &dev->caps.pcam, sizeof(dev->caps.pcam), "IU", "");
1563 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1564 SYSCTL_CHILDREN(cap_sysctl_node),
1565 OID_AUTO, "mcam", CTLFLAG_RD | CTLFLAG_MPSAFE,
1566 &dev->caps.mcam, sizeof(dev->caps.mcam), "IU", "");
1567 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1568 SYSCTL_CHILDREN(cap_sysctl_node),
1569 OID_AUTO, "qcam", CTLFLAG_RD | CTLFLAG_MPSAFE,
1570 &dev->caps.qcam, sizeof(dev->caps.qcam), "IU", "");
1571 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1572 SYSCTL_CHILDREN(cap_sysctl_node),
1573 OID_AUTO, "fpga", CTLFLAG_RD | CTLFLAG_MPSAFE,
1574 &dev->caps.fpga, sizeof(dev->caps.fpga), "IU", "");
1576 INIT_LIST_HEAD(&priv->ctx_list);
1577 spin_lock_init(&priv->ctx_lock);
1578 mutex_init(&dev->pci_status_mutex);
1579 mutex_init(&dev->intf_state_mutex);
1580 mtx_init(&dev->dump_lock, "mlx5dmp", NULL, MTX_DEF | MTX_NEW);
1581 err = mlx5_pci_init(dev, priv);
1583 mlx5_core_err(dev, "mlx5_pci_init failed %d\n", err);
1587 err = mlx5_health_init(dev);
1589 mlx5_core_err(dev, "mlx5_health_init failed %d\n", err);
1593 mlx5_pagealloc_init(dev);
1595 err = mlx5_load_one(dev, priv, true);
1597 mlx5_core_err(dev, "mlx5_load_one failed %d\n", err);
1601 mlx5_fwdump_prep(dev);
1603 mlx5_firmware_update(dev);
1606 if (MLX5_CAP_GEN(dev, vport_group_manager)) {
1607 if (pci_find_extcap(bsddev, PCIZ_SRIOV, &sriov_pos) == 0) {
1608 num_vfs = pci_read_config(bsddev, sriov_pos +
1609 PCIR_SRIOV_TOTAL_VFS, 2);
1611 mlx5_core_info(dev, "cannot find SR-IOV PCIe cap\n");
1614 err = mlx5_eswitch_init(dev, 1 + num_vfs);
1616 pf_schema = pci_iov_schema_alloc_node();
1617 vf_schema = pci_iov_schema_alloc_node();
1618 pci_iov_schema_add_unicast_mac(vf_schema,
1619 iov_mac_addr_name, 0, NULL);
1620 err = pci_iov_attach(bsddev, pf_schema, vf_schema);
1622 device_printf(bsddev,
1623 "Failed to initialize SR-IOV support, error %d\n",
1627 mlx5_core_err(dev, "eswitch init failed, error %d\n",
1633 pci_save_state(bsddev);
1637 mlx5_pagealloc_cleanup(dev);
1638 mlx5_health_cleanup(dev);
1640 mlx5_pci_close(dev, priv);
1642 mtx_destroy(&dev->dump_lock);
1644 sysctl_ctx_free(&dev->sysctl_ctx);
1649 static void remove_one(struct pci_dev *pdev)
1651 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1652 struct mlx5_priv *priv = &dev->priv;
1654 if (mlx5_unload_one(dev, priv, true)) {
1655 mlx5_core_err(dev, "mlx5_unload_one failed\n");
1656 mlx5_health_cleanup(dev);
1660 mlx5_pagealloc_cleanup(dev);
1661 mlx5_health_cleanup(dev);
1662 mlx5_fwdump_clean(dev);
1663 mlx5_pci_close(dev, priv);
1664 mtx_destroy(&dev->dump_lock);
1665 pci_set_drvdata(pdev, NULL);
1666 sysctl_ctx_free(&dev->sysctl_ctx);
1670 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1671 pci_channel_state_t state)
1673 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1674 struct mlx5_priv *priv = &dev->priv;
1676 mlx5_core_info(dev, "%s was called\n", __func__);
1677 mlx5_enter_error_state(dev, false);
1678 mlx5_unload_one(dev, priv, false);
1681 mlx5_drain_health_wq(dev);
1682 mlx5_pci_disable_device(dev);
1685 return state == pci_channel_io_perm_failure ?
1686 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1689 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1691 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1694 mlx5_core_info(dev,"%s was called\n", __func__);
1696 err = mlx5_pci_enable_device(dev);
1698 mlx5_core_err(dev, "mlx5_pci_enable_device failed with error code: %d\n"
1700 return PCI_ERS_RESULT_DISCONNECT;
1702 pci_set_master(pdev);
1703 pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0);
1704 pci_restore_state(pdev->dev.bsddev);
1705 pci_save_state(pdev->dev.bsddev);
1707 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1710 /* wait for the device to show vital signs. For now we check
1711 * that we can read the device ID and that the health buffer
1712 * shows a non zero value which is different than 0xffffffff
1714 static void wait_vital(struct pci_dev *pdev)
1716 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1717 struct mlx5_core_health *health = &dev->priv.health;
1718 const int niter = 100;
1723 /* Wait for firmware to be ready after reset */
1725 for (i = 0; i < niter; i++) {
1726 if (pci_read_config_word(pdev, 2, &did)) {
1727 mlx5_core_warn(dev, "failed reading config word\n");
1730 if (did == pdev->device) {
1732 "device ID correctly read after %d iterations\n", i);
1738 mlx5_core_warn(dev, "could not read device ID\n");
1740 for (i = 0; i < niter; i++) {
1741 count = ioread32be(health->health_counter);
1742 if (count && count != 0xffffffff) {
1744 "Counter value 0x%x after %d iterations\n", count, i);
1751 mlx5_core_warn(dev, "could not read device ID\n");
1754 static void mlx5_pci_resume(struct pci_dev *pdev)
1756 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1757 struct mlx5_priv *priv = &dev->priv;
1760 mlx5_core_info(dev,"%s was called\n", __func__);
1764 err = mlx5_load_one(dev, priv, false);
1767 "mlx5_load_one failed with error code: %d\n" ,err);
1769 mlx5_core_info(dev,"device recovered\n");
1772 static const struct pci_error_handlers mlx5_err_handler = {
1773 .error_detected = mlx5_pci_err_detected,
1774 .slot_reset = mlx5_pci_slot_reset,
1775 .resume = mlx5_pci_resume
1780 mlx5_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config)
1782 struct pci_dev *pdev;
1783 struct mlx5_core_dev *core_dev;
1784 struct mlx5_priv *priv;
1787 pdev = device_get_softc(dev);
1788 core_dev = pci_get_drvdata(pdev);
1789 priv = &core_dev->priv;
1791 if (priv->eswitch == NULL)
1793 if (priv->eswitch->total_vports < num_vfs + 1)
1794 num_vfs = priv->eswitch->total_vports - 1;
1795 err = mlx5_eswitch_enable_sriov(priv->eswitch, num_vfs);
1800 mlx5_iov_uninit(device_t dev)
1802 struct pci_dev *pdev;
1803 struct mlx5_core_dev *core_dev;
1804 struct mlx5_priv *priv;
1806 pdev = device_get_softc(dev);
1807 core_dev = pci_get_drvdata(pdev);
1808 priv = &core_dev->priv;
1810 mlx5_eswitch_disable_sriov(priv->eswitch);
1814 mlx5_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config)
1816 struct pci_dev *pdev;
1817 struct mlx5_core_dev *core_dev;
1818 struct mlx5_priv *priv;
1823 pdev = device_get_softc(dev);
1824 core_dev = pci_get_drvdata(pdev);
1825 priv = &core_dev->priv;
1827 if (vfnum + 1 >= priv->eswitch->total_vports)
1830 if (nvlist_exists_binary(vf_config, iov_mac_addr_name)) {
1831 mac = nvlist_get_binary(vf_config, iov_mac_addr_name,
1833 error = -mlx5_eswitch_set_vport_mac(priv->eswitch,
1834 vfnum + 1, __DECONST(u8 *, mac));
1837 error = -mlx5_eswitch_set_vport_state(priv->eswitch, vfnum + 1,
1838 VPORT_STATE_FOLLOW);
1840 mlx5_core_err(core_dev,
1841 "upping vport for VF %d failed, error %d\n",
1844 error = -mlx5_core_enable_hca(core_dev, vfnum + 1);
1846 mlx5_core_err(core_dev, "enabling VF %d failed, error %d\n",
1853 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1855 bool fast_teardown, force_teardown;
1858 if (!mlx5_fast_unload_enabled) {
1859 mlx5_core_dbg(dev, "fast unload is disabled by user\n");
1863 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
1864 force_teardown = MLX5_CAP_GEN(dev, force_teardown);
1866 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
1867 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
1869 if (!fast_teardown && !force_teardown)
1872 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1873 mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
1877 /* Panic tear down fw command will stop the PCI bus communication
1878 * with the HCA, so the health polll is no longer needed.
1880 mlx5_drain_health_wq(dev);
1881 mlx5_stop_health_poll(dev, false);
1883 err = mlx5_cmd_fast_teardown_hca(dev);
1887 err = mlx5_cmd_force_teardown_hca(dev);
1891 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err);
1892 mlx5_start_health_poll(dev);
1895 mlx5_enter_error_state(dev, true);
1899 static void mlx5_shutdown_disable_interrupts(struct mlx5_core_dev *mdev)
1901 int nvec = mdev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
1904 mdev->priv.disable_irqs = 1;
1906 /* wait for all IRQ handlers to finish processing */
1907 for (x = 0; x != nvec; x++)
1908 synchronize_irq(mdev->priv.msix_arr[x].vector);
1911 static void shutdown_one(struct pci_dev *pdev)
1913 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1914 struct mlx5_priv *priv = &dev->priv;
1917 /* enter polling mode */
1918 mlx5_cmd_use_polling(dev);
1920 set_bit(MLX5_INTERFACE_STATE_TEARDOWN, &dev->intf_state);
1922 /* disable all interrupts */
1923 mlx5_shutdown_disable_interrupts(dev);
1925 err = mlx5_try_fast_unload(dev);
1927 mlx5_unload_one(dev, priv, false);
1928 mlx5_pci_disable_device(dev);
1931 static const struct pci_device_id mlx5_core_pci_table[] = {
1932 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
1933 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
1934 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
1935 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
1936 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
1937 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
1938 { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */
1939 { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */
1940 { PCI_VDEVICE(MELLANOX, 4121) },
1941 { PCI_VDEVICE(MELLANOX, 4122) },
1942 { PCI_VDEVICE(MELLANOX, 4123) },
1943 { PCI_VDEVICE(MELLANOX, 4124) },
1944 { PCI_VDEVICE(MELLANOX, 4125) },
1945 { PCI_VDEVICE(MELLANOX, 4126) },
1946 { PCI_VDEVICE(MELLANOX, 4127) },
1947 { PCI_VDEVICE(MELLANOX, 4128) },
1948 { PCI_VDEVICE(MELLANOX, 4129) },
1949 { PCI_VDEVICE(MELLANOX, 4130) },
1950 { PCI_VDEVICE(MELLANOX, 4131) },
1951 { PCI_VDEVICE(MELLANOX, 4132) },
1952 { PCI_VDEVICE(MELLANOX, 4133) },
1953 { PCI_VDEVICE(MELLANOX, 4134) },
1954 { PCI_VDEVICE(MELLANOX, 4135) },
1955 { PCI_VDEVICE(MELLANOX, 4136) },
1956 { PCI_VDEVICE(MELLANOX, 4137) },
1957 { PCI_VDEVICE(MELLANOX, 4138) },
1958 { PCI_VDEVICE(MELLANOX, 4139) },
1959 { PCI_VDEVICE(MELLANOX, 4140) },
1960 { PCI_VDEVICE(MELLANOX, 4141) },
1961 { PCI_VDEVICE(MELLANOX, 4142) },
1962 { PCI_VDEVICE(MELLANOX, 4143) },
1963 { PCI_VDEVICE(MELLANOX, 4144) },
1967 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1969 void mlx5_disable_device(struct mlx5_core_dev *dev)
1971 mlx5_pci_err_detected(dev->pdev, 0);
1974 void mlx5_recover_device(struct mlx5_core_dev *dev)
1976 mlx5_pci_disable_device(dev);
1977 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
1978 mlx5_pci_resume(dev->pdev);
1981 struct pci_driver mlx5_core_driver = {
1982 .name = DRIVER_NAME,
1983 .id_table = mlx5_core_pci_table,
1984 .shutdown = shutdown_one,
1986 .remove = remove_one,
1987 .err_handler = &mlx5_err_handler,
1989 .bsd_iov_init = mlx5_iov_init,
1990 .bsd_iov_uninit = mlx5_iov_uninit,
1991 .bsd_iov_add_vf = mlx5_iov_add_vf,
1995 static int __init init(void)
1999 err = pci_register_driver(&mlx5_core_driver);
2003 err = mlx5_ctl_init();
2010 pci_unregister_driver(&mlx5_core_driver);
2016 static void __exit cleanup(void)
2019 pci_unregister_driver(&mlx5_core_driver);
2023 module_exit(cleanup);