2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #define LINUXKPI_PARAM_PREFIX mlx5_
30 #include <linux/kmod.h>
31 #include <linux/module.h>
32 #include <linux/errno.h>
33 #include <linux/pci.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/slab.h>
36 #include <linux/io-mapping.h>
37 #include <linux/interrupt.h>
38 #include <dev/mlx5/driver.h>
39 #include <dev/mlx5/cq.h>
40 #include <dev/mlx5/qp.h>
41 #include <dev/mlx5/srq.h>
42 #include <linux/delay.h>
43 #include <dev/mlx5/mlx5_ifc.h>
44 #include "mlx5_core.h"
47 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
48 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
49 MODULE_LICENSE("Dual BSD/GPL");
50 #if (__FreeBSD_version >= 1100000)
51 MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1);
53 MODULE_VERSION(mlx5, 1);
55 int mlx5_core_debug_mask;
56 module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
57 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
59 #define MLX5_DEFAULT_PROF 2
60 static int prof_sel = MLX5_DEFAULT_PROF;
61 module_param_named(prof_sel, prof_sel, int, 0444);
62 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
64 #define NUMA_NO_NODE -1
66 struct workqueue_struct *mlx5_core_wq;
67 static LIST_HEAD(intf_list);
68 static LIST_HEAD(dev_list);
69 static DEFINE_MUTEX(intf_mutex);
71 struct mlx5_device_context {
72 struct list_head list;
73 struct mlx5_interface *intf;
77 static struct mlx5_profile profiles[] = {
82 .mask = MLX5_PROF_MASK_QP_SIZE,
86 .mask = MLX5_PROF_MASK_QP_SIZE |
87 MLX5_PROF_MASK_MR_CACHE,
151 .mask = MLX5_PROF_MASK_QP_SIZE,
156 static int set_dma_caps(struct pci_dev *pdev)
160 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
162 device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit PCI DMA mask\n");
163 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
165 device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set PCI DMA mask, aborting\n");
170 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
172 device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit consistent PCI DMA mask\n");
173 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
175 device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set consistent PCI DMA mask, aborting\n");
180 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
184 static int request_bar(struct pci_dev *pdev)
188 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
189 device_printf((&pdev->dev)->bsddev, "ERR: ""Missing registers BAR, aborting\n");
193 err = pci_request_regions(pdev, DRIVER_NAME);
195 device_printf((&pdev->dev)->bsddev, "ERR: ""Couldn't get PCI resources, aborting\n");
200 static void release_bar(struct pci_dev *pdev)
202 pci_release_regions(pdev);
205 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
207 struct mlx5_priv *priv = &dev->priv;
208 struct mlx5_eq_table *table = &priv->eq_table;
209 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
213 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
214 MLX5_EQ_VEC_COMP_BASE;
215 nvec = min_t(int, nvec, num_eqs);
216 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
219 priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL);
221 priv->irq_info = kzalloc(nvec * sizeof(*priv->irq_info), GFP_KERNEL);
223 for (i = 0; i < nvec; i++)
224 priv->msix_arr[i].entry = i;
226 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
227 MLX5_EQ_VEC_COMP_BASE + 1, nvec);
231 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
237 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
239 struct mlx5_priv *priv = &dev->priv;
241 pci_disable_msix(dev->pdev);
242 kfree(priv->irq_info);
243 kfree(priv->msix_arr);
246 struct mlx5_reg_host_endianess {
252 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
255 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
256 MLX5_DEV_CAP_FLAG_DCT |
257 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR,
260 static u16 to_fw_pkey_sz(u32 size)
276 printf("mlx5_core: WARN: ""invalid pkey table size %d\n", size);
281 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
282 enum mlx5_cap_mode cap_mode)
284 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
285 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
286 void *out, *hca_caps;
287 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
290 memset(in, 0, sizeof(in));
291 out = kzalloc(out_sz, GFP_KERNEL);
293 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
294 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
295 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
299 err = mlx5_cmd_status_to_err_v2(out);
302 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
303 cap_type, cap_mode, err);
307 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
310 case HCA_CAP_OPMOD_GET_MAX:
311 memcpy(dev->hca_caps_max[cap_type], hca_caps,
312 MLX5_UN_SZ_BYTES(hca_cap_union));
314 case HCA_CAP_OPMOD_GET_CUR:
315 memcpy(dev->hca_caps_cur[cap_type], hca_caps,
316 MLX5_UN_SZ_BYTES(hca_cap_union));
320 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
330 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
332 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
335 memset(out, 0, sizeof(out));
337 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
338 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
342 err = mlx5_cmd_status_to_err_v2(out);
347 static int handle_hca_cap(struct mlx5_core_dev *dev)
349 void *set_ctx = NULL;
350 struct mlx5_profile *prof = dev->profile;
352 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
355 set_ctx = kzalloc(set_sz, GFP_KERNEL);
357 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
361 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
365 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
367 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
368 MLX5_ST_SZ_BYTES(cmd_hca_cap));
370 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
371 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
373 /* we limit the size of the pkey table to 128 entries for now */
374 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
377 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
378 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
381 /* disable cmdif checksum */
382 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
384 /* enable drain sigerr */
385 MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1);
387 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
389 err = set_caps(dev, set_ctx, set_sz);
396 static int set_hca_ctrl(struct mlx5_core_dev *dev)
398 struct mlx5_reg_host_endianess he_in;
399 struct mlx5_reg_host_endianess he_out;
402 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
403 !MLX5_CAP_GEN(dev, roce))
406 memset(&he_in, 0, sizeof(he_in));
407 he_in.he = MLX5_SET_HOST_ENDIANNESS;
408 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
409 &he_out, sizeof(he_out),
410 MLX5_REG_HOST_ENDIANNESS, 0, 1);
414 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
416 u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
417 u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
419 memset(in, 0, sizeof(in));
420 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
421 memset(out, 0, sizeof(out));
422 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
426 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
428 u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
429 u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
431 memset(in, 0, sizeof(in));
433 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
434 memset(out, 0, sizeof(out));
435 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
439 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
441 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
442 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
443 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
444 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
448 memset(query_in, 0, sizeof(query_in));
449 memset(query_out, 0, sizeof(query_out));
451 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
453 err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
454 query_out, sizeof(query_out));
456 if (((struct mlx5_outbox_hdr *)query_out)->status ==
457 MLX5_CMD_STAT_BAD_OP_ERR) {
458 pr_debug("Only ISSI 0 is supported\n");
462 printf("mlx5_core: ERR: ""failed to query ISSI\n");
466 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
468 if (sup_issi & (1 << 1)) {
469 memset(set_in, 0, sizeof(set_in));
470 memset(set_out, 0, sizeof(set_out));
472 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
473 MLX5_SET(set_issi_in, set_in, current_issi, 1);
475 err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
476 set_out, sizeof(set_out));
478 printf("mlx5_core: ERR: ""failed to set ISSI=1\n");
485 } else if (sup_issi & (1 << 0)) {
493 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
495 struct mlx5_eq_table *table = &dev->priv.eq_table;
499 spin_lock(&table->lock);
500 list_for_each_entry(eq, &table->comp_eqs_list, list) {
501 if (eq->index == vector) {
508 spin_unlock(&table->lock);
512 EXPORT_SYMBOL(mlx5_vector2eqn);
514 int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name)
516 struct mlx5_priv *priv = &dev->priv;
517 struct mlx5_eq_table *table = &priv->eq_table;
521 spin_lock(&table->lock);
522 list_for_each_entry(eq, &table->comp_eqs_list, list) {
523 if (eq->index == eq_ix) {
524 int irq_ix = eq_ix + MLX5_EQ_VEC_COMP_BASE;
526 snprintf(priv->irq_info[irq_ix].name, MLX5_MAX_IRQ_NAME,
527 "%s-%d", name, eq_ix);
533 spin_unlock(&table->lock);
538 static void free_comp_eqs(struct mlx5_core_dev *dev)
540 struct mlx5_eq_table *table = &dev->priv.eq_table;
541 struct mlx5_eq *eq, *n;
543 spin_lock(&table->lock);
544 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
546 spin_unlock(&table->lock);
547 if (mlx5_destroy_unmap_eq(dev, eq))
548 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
551 spin_lock(&table->lock);
553 spin_unlock(&table->lock);
556 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
558 struct mlx5_eq_table *table = &dev->priv.eq_table;
559 char name[MLX5_MAX_IRQ_NAME];
566 INIT_LIST_HEAD(&table->comp_eqs_list);
567 ncomp_vec = table->num_comp_vectors;
568 nent = MLX5_COMP_EQ_SIZE;
569 for (i = 0; i < ncomp_vec; i++) {
570 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
572 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
573 err = mlx5_create_map_eq(dev, eq,
574 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
575 name, &dev->priv.uuari.uars[0]);
580 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
582 spin_lock(&table->lock);
583 list_add_tail(&eq->list, &table->comp_eqs_list);
584 spin_unlock(&table->lock);
594 static int map_bf_area(struct mlx5_core_dev *dev)
596 resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
597 resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
599 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
601 return dev->priv.bf_mapping ? 0 : -ENOMEM;
604 static void unmap_bf_area(struct mlx5_core_dev *dev)
606 if (dev->priv.bf_mapping)
607 io_mapping_free(dev->priv.bf_mapping);
610 static inline int fw_initializing(struct mlx5_core_dev *dev)
612 return ioread32be(&dev->iseg->initializing) >> 31;
615 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
617 u64 end = jiffies + msecs_to_jiffies(max_wait_mili);
620 while (fw_initializing(dev)) {
621 if (time_after(jiffies, end)) {
625 msleep(FW_INIT_WAIT_MS);
631 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
633 struct mlx5_priv *priv = &dev->priv;
637 pci_set_drvdata(dev->pdev, dev);
638 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
639 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
641 mutex_init(&priv->pgdir_mutex);
642 INIT_LIST_HEAD(&priv->pgdir_list);
643 spin_lock_init(&priv->mkey_lock);
645 priv->numa_node = NUMA_NO_NODE;
647 err = pci_enable_device(pdev);
649 device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI device, aborting\n");
653 err = request_bar(pdev);
655 device_printf((&pdev->dev)->bsddev, "ERR: ""error requesting BARs, aborting\n");
659 pci_set_master(pdev);
661 err = set_dma_caps(pdev);
663 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed setting DMA capabilities mask, aborting\n");
667 dev->iseg = ioremap(pci_resource_start(dev->pdev, 0),
671 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n");
674 device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
677 * On load removing any previous indication of internal error,
680 dev->state = MLX5_DEVICE_STATE_UP;
682 err = mlx5_cmd_init(dev);
684 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n");
688 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
690 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI);
691 goto err_cmd_cleanup;
694 mlx5_pagealloc_init(dev);
696 err = mlx5_core_enable_hca(dev);
698 device_printf((&pdev->dev)->bsddev, "ERR: ""enable hca failed\n");
699 goto err_pagealloc_cleanup;
702 err = mlx5_core_set_issi(dev);
704 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to set issi\n");
705 goto err_disable_hca;
708 err = mlx5_pagealloc_start(dev);
710 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pagealloc_start failed\n");
711 goto err_disable_hca;
714 err = mlx5_satisfy_startup_pages(dev, 1);
716 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate boot pages\n");
717 goto err_pagealloc_stop;
720 err = handle_hca_cap(dev);
722 device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n");
723 goto reclaim_boot_pages;
726 err = set_hca_ctrl(dev);
728 device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n");
729 goto reclaim_boot_pages;
732 err = mlx5_satisfy_startup_pages(dev, 0);
734 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate init pages\n");
735 goto reclaim_boot_pages;
738 err = mlx5_cmd_init_hca(dev);
740 device_printf((&pdev->dev)->bsddev, "ERR: ""init hca failed\n");
741 goto reclaim_boot_pages;
744 mlx5_start_health_poll(dev);
746 err = mlx5_query_hca_caps(dev);
748 device_printf((&pdev->dev)->bsddev, "ERR: ""query hca failed\n");
752 err = mlx5_query_board_id(dev);
754 device_printf((&pdev->dev)->bsddev, "ERR: ""query board id failed\n");
758 err = mlx5_enable_msix(dev);
760 device_printf((&pdev->dev)->bsddev, "ERR: ""enable msix failed\n");
764 err = mlx5_eq_init(dev);
766 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to initialize eq\n");
770 err = mlx5_alloc_uuars(dev, &priv->uuari);
772 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed allocating uar, aborting\n");
776 err = mlx5_start_eqs(dev);
778 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to start pages and async EQs\n");
782 err = alloc_comp_eqs(dev);
784 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to alloc completion EQs\n");
788 if (map_bf_area(dev))
789 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to map blue flame area\n");
791 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
793 mlx5_init_cq_table(dev);
794 mlx5_init_qp_table(dev);
795 mlx5_init_srq_table(dev);
796 mlx5_init_mr_table(dev);
798 err = mlx5_init_fs(dev);
800 mlx5_core_err(dev, "flow steering init %d\n", err);
801 goto err_init_tables;
807 mlx5_cleanup_mr_table(dev);
808 mlx5_cleanup_srq_table(dev);
809 mlx5_cleanup_qp_table(dev);
810 mlx5_cleanup_cq_table(dev);
817 mlx5_free_uuars(dev, &priv->uuari);
820 mlx5_eq_cleanup(dev);
823 mlx5_disable_msix(dev);
826 mlx5_stop_health_poll(dev);
827 if (mlx5_cmd_teardown_hca(dev)) {
828 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
833 mlx5_reclaim_startup_pages(dev);
836 mlx5_pagealloc_stop(dev);
839 mlx5_core_disable_hca(dev);
841 err_pagealloc_cleanup:
842 mlx5_pagealloc_cleanup(dev);
844 mlx5_cmd_cleanup(dev);
850 pci_clear_master(dev->pdev);
851 release_bar(dev->pdev);
854 pci_disable_device(dev->pdev);
857 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
861 static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
863 struct mlx5_priv *priv = &dev->priv;
865 mlx5_cleanup_fs(dev);
866 mlx5_cleanup_mr_table(dev);
867 mlx5_cleanup_srq_table(dev);
868 mlx5_cleanup_qp_table(dev);
869 mlx5_cleanup_cq_table(dev);
871 mlx5_wait_for_reclaim_vfs_pages(dev);
874 mlx5_free_uuars(dev, &priv->uuari);
875 mlx5_eq_cleanup(dev);
876 mlx5_disable_msix(dev);
877 mlx5_stop_health_poll(dev);
878 if (mlx5_cmd_teardown_hca(dev)) {
879 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
882 mlx5_pagealloc_stop(dev);
883 mlx5_reclaim_startup_pages(dev);
884 mlx5_core_disable_hca(dev);
885 mlx5_pagealloc_cleanup(dev);
886 mlx5_cmd_cleanup(dev);
888 pci_clear_master(dev->pdev);
889 release_bar(dev->pdev);
890 pci_disable_device(dev->pdev);
893 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
895 struct mlx5_device_context *dev_ctx;
896 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
898 dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
900 dev_ctx->intf = intf;
901 dev_ctx->context = intf->add(dev);
903 if (dev_ctx->context) {
904 spin_lock_irq(&priv->ctx_lock);
905 list_add_tail(&dev_ctx->list, &priv->ctx_list);
906 spin_unlock_irq(&priv->ctx_lock);
912 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
914 struct mlx5_device_context *dev_ctx;
915 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
917 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
918 if (dev_ctx->intf == intf) {
919 spin_lock_irq(&priv->ctx_lock);
920 list_del(&dev_ctx->list);
921 spin_unlock_irq(&priv->ctx_lock);
923 intf->remove(dev, dev_ctx->context);
928 static int mlx5_register_device(struct mlx5_core_dev *dev)
930 struct mlx5_priv *priv = &dev->priv;
931 struct mlx5_interface *intf;
933 mutex_lock(&intf_mutex);
934 list_add_tail(&priv->dev_list, &dev_list);
935 list_for_each_entry(intf, &intf_list, list)
936 mlx5_add_device(intf, priv);
937 mutex_unlock(&intf_mutex);
941 static void mlx5_unregister_device(struct mlx5_core_dev *dev)
943 struct mlx5_priv *priv = &dev->priv;
944 struct mlx5_interface *intf;
946 mutex_lock(&intf_mutex);
947 list_for_each_entry(intf, &intf_list, list)
948 mlx5_remove_device(intf, priv);
949 list_del(&priv->dev_list);
950 mutex_unlock(&intf_mutex);
953 int mlx5_register_interface(struct mlx5_interface *intf)
955 struct mlx5_priv *priv;
957 if (!intf->add || !intf->remove)
960 mutex_lock(&intf_mutex);
961 list_add_tail(&intf->list, &intf_list);
962 list_for_each_entry(priv, &dev_list, dev_list)
963 mlx5_add_device(intf, priv);
964 mutex_unlock(&intf_mutex);
968 EXPORT_SYMBOL(mlx5_register_interface);
970 void mlx5_unregister_interface(struct mlx5_interface *intf)
972 struct mlx5_priv *priv;
974 mutex_lock(&intf_mutex);
975 list_for_each_entry(priv, &dev_list, dev_list)
976 mlx5_remove_device(intf, priv);
977 list_del(&intf->list);
978 mutex_unlock(&intf_mutex);
980 EXPORT_SYMBOL(mlx5_unregister_interface);
982 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
984 struct mlx5_priv *priv = &mdev->priv;
985 struct mlx5_device_context *dev_ctx;
989 spin_lock_irqsave(&priv->ctx_lock, flags);
991 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
992 if ((dev_ctx->intf->protocol == protocol) &&
993 dev_ctx->intf->get_dev) {
994 result = dev_ctx->intf->get_dev(dev_ctx->context);
998 spin_unlock_irqrestore(&priv->ctx_lock, flags);
1002 EXPORT_SYMBOL(mlx5_get_protocol_dev);
1004 static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1005 unsigned long param)
1007 struct mlx5_priv *priv = &dev->priv;
1008 struct mlx5_device_context *dev_ctx;
1009 unsigned long flags;
1011 spin_lock_irqsave(&priv->ctx_lock, flags);
1013 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
1014 if (dev_ctx->intf->event)
1015 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
1017 spin_unlock_irqrestore(&priv->ctx_lock, flags);
1020 struct mlx5_core_event_handler {
1021 void (*event)(struct mlx5_core_dev *dev,
1022 enum mlx5_dev_event event,
1027 static int init_one(struct pci_dev *pdev,
1028 const struct pci_device_id *id)
1030 struct mlx5_core_dev *dev;
1031 struct mlx5_priv *priv;
1034 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1037 priv->pci_dev_data = id->driver_data;
1039 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profiles)) {
1040 printf("mlx5_core: WARN: ""selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF);
1041 prof_sel = MLX5_DEFAULT_PROF;
1043 dev->profile = &profiles[prof_sel];
1044 dev->event = mlx5_core_event;
1046 INIT_LIST_HEAD(&priv->ctx_list);
1047 spin_lock_init(&priv->ctx_lock);
1048 err = mlx5_dev_init(dev, pdev);
1050 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_dev_init failed %d\n", err);
1054 err = mlx5_register_device(dev);
1056 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_register_device failed %d\n", err);
1064 mlx5_dev_cleanup(dev);
1070 static void remove_one(struct pci_dev *pdev)
1072 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1074 mlx5_unregister_device(dev);
1075 mlx5_dev_cleanup(dev);
1079 static const struct pci_device_id mlx5_core_pci_table[] = {
1080 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
1081 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
1082 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
1083 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
1084 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
1085 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
1086 { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */
1087 { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */
1088 { PCI_VDEVICE(MELLANOX, 4121) },
1089 { PCI_VDEVICE(MELLANOX, 4122) },
1090 { PCI_VDEVICE(MELLANOX, 4123) },
1091 { PCI_VDEVICE(MELLANOX, 4124) },
1092 { PCI_VDEVICE(MELLANOX, 4125) },
1093 { PCI_VDEVICE(MELLANOX, 4126) },
1094 { PCI_VDEVICE(MELLANOX, 4127) },
1095 { PCI_VDEVICE(MELLANOX, 4128) },
1096 { PCI_VDEVICE(MELLANOX, 4129) },
1097 { PCI_VDEVICE(MELLANOX, 4130) },
1098 { PCI_VDEVICE(MELLANOX, 4131) },
1099 { PCI_VDEVICE(MELLANOX, 4132) },
1100 { PCI_VDEVICE(MELLANOX, 4133) },
1101 { PCI_VDEVICE(MELLANOX, 4134) },
1102 { PCI_VDEVICE(MELLANOX, 4135) },
1103 { PCI_VDEVICE(MELLANOX, 4136) },
1104 { PCI_VDEVICE(MELLANOX, 4137) },
1105 { PCI_VDEVICE(MELLANOX, 4138) },
1106 { PCI_VDEVICE(MELLANOX, 4139) },
1107 { PCI_VDEVICE(MELLANOX, 4140) },
1108 { PCI_VDEVICE(MELLANOX, 4141) },
1109 { PCI_VDEVICE(MELLANOX, 4142) },
1110 { PCI_VDEVICE(MELLANOX, 4143) },
1111 { PCI_VDEVICE(MELLANOX, 4144) },
1115 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1117 static struct pci_driver mlx5_core_driver = {
1118 .name = DRIVER_NAME,
1119 .id_table = mlx5_core_pci_table,
1121 .remove = remove_one
1124 static int __init init(void)
1128 mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
1129 if (!mlx5_core_wq) {
1135 err = pci_register_driver(&mlx5_core_driver);
1143 mlx5_health_cleanup();
1144 destroy_workqueue(mlx5_core_wq);
1149 static void __exit cleanup(void)
1151 pci_unregister_driver(&mlx5_core_driver);
1152 mlx5_health_cleanup();
1153 destroy_workqueue(mlx5_core_wq);
1157 module_exit(cleanup);
1159 void mlx5_enter_error_state(struct mlx5_core_dev *dev)
1161 if (dev->state != MLX5_DEVICE_STATE_UP)
1164 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1165 mlx5_trigger_cmd_completions(dev);
1167 EXPORT_SYMBOL(mlx5_enter_error_state);