2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #define LINUXKPI_PARAM_PREFIX mlx5_
30 #include <linux/kmod.h>
31 #include <linux/module.h>
32 #include <linux/errno.h>
33 #include <linux/pci.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/slab.h>
36 #include <linux/io-mapping.h>
37 #include <linux/interrupt.h>
38 #include <dev/mlx5/driver.h>
39 #include <dev/mlx5/cq.h>
40 #include <dev/mlx5/qp.h>
41 #include <dev/mlx5/srq.h>
42 #include <linux/delay.h>
43 #include <dev/mlx5/mlx5_ifc.h>
44 #include "mlx5_core.h"
46 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
47 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
48 MODULE_LICENSE("Dual BSD/GPL");
49 #if (__FreeBSD_version >= 1100000)
50 MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1);
52 MODULE_VERSION(mlx5, 1);
54 int mlx5_core_debug_mask;
55 module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
56 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
58 #define MLX5_DEFAULT_PROF 2
59 static int prof_sel = MLX5_DEFAULT_PROF;
60 module_param_named(prof_sel, prof_sel, int, 0444);
61 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
63 #define NUMA_NO_NODE -1
65 struct workqueue_struct *mlx5_core_wq;
66 static LIST_HEAD(intf_list);
67 static LIST_HEAD(dev_list);
68 static DEFINE_MUTEX(intf_mutex);
70 struct mlx5_device_context {
71 struct list_head list;
72 struct mlx5_interface *intf;
76 static struct mlx5_profile profiles[] = {
81 .mask = MLX5_PROF_MASK_QP_SIZE,
85 .mask = MLX5_PROF_MASK_QP_SIZE |
86 MLX5_PROF_MASK_MR_CACHE,
154 .mask = MLX5_PROF_MASK_QP_SIZE,
159 static int set_dma_caps(struct pci_dev *pdev)
163 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
165 device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit PCI DMA mask\n");
166 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
168 device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set PCI DMA mask, aborting\n");
173 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
175 device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit consistent PCI DMA mask\n");
176 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
178 device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set consistent PCI DMA mask, aborting\n");
183 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
187 static int request_bar(struct pci_dev *pdev)
191 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
192 device_printf((&pdev->dev)->bsddev, "ERR: ""Missing registers BAR, aborting\n");
196 err = pci_request_regions(pdev, DRIVER_NAME);
198 device_printf((&pdev->dev)->bsddev, "ERR: ""Couldn't get PCI resources, aborting\n");
203 static void release_bar(struct pci_dev *pdev)
205 pci_release_regions(pdev);
208 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
210 struct mlx5_priv *priv = &dev->priv;
211 struct mlx5_eq_table *table = &priv->eq_table;
212 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
216 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
217 MLX5_EQ_VEC_COMP_BASE;
218 nvec = min_t(int, nvec, num_eqs);
219 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
222 priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL);
224 priv->irq_info = kzalloc(nvec * sizeof(*priv->irq_info), GFP_KERNEL);
226 for (i = 0; i < nvec; i++)
227 priv->msix_arr[i].entry = i;
229 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
230 MLX5_EQ_VEC_COMP_BASE + 1, nvec);
234 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
240 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
242 struct mlx5_priv *priv = &dev->priv;
244 pci_disable_msix(dev->pdev);
245 kfree(priv->irq_info);
246 kfree(priv->msix_arr);
249 struct mlx5_reg_host_endianess {
255 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
258 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
259 MLX5_DEV_CAP_FLAG_DCT,
262 static u16 to_fw_pkey_sz(u32 size)
278 printf("mlx5_core: WARN: ""invalid pkey table size %d\n", size);
283 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
284 enum mlx5_cap_mode cap_mode)
286 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
287 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
288 void *out, *hca_caps;
289 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
292 memset(in, 0, sizeof(in));
293 out = kzalloc(out_sz, GFP_KERNEL);
295 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
296 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
297 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
301 err = mlx5_cmd_status_to_err_v2(out);
304 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
305 cap_type, cap_mode, err);
309 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
312 case HCA_CAP_OPMOD_GET_MAX:
313 memcpy(dev->hca_caps_max[cap_type], hca_caps,
314 MLX5_UN_SZ_BYTES(hca_cap_union));
316 case HCA_CAP_OPMOD_GET_CUR:
317 memcpy(dev->hca_caps_cur[cap_type], hca_caps,
318 MLX5_UN_SZ_BYTES(hca_cap_union));
322 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
332 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
334 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
337 memset(out, 0, sizeof(out));
339 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
340 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
344 err = mlx5_cmd_status_to_err_v2(out);
349 static int handle_hca_cap(struct mlx5_core_dev *dev)
351 void *set_ctx = NULL;
352 struct mlx5_profile *prof = dev->profile;
354 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
357 set_ctx = kzalloc(set_sz, GFP_KERNEL);
359 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
363 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
367 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
369 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
370 MLX5_ST_SZ_BYTES(cmd_hca_cap));
372 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
373 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
375 /* we limit the size of the pkey table to 128 entries for now */
376 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
379 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
380 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
383 /* disable cmdif checksum */
384 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
386 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
388 err = set_caps(dev, set_ctx, set_sz);
395 static int set_hca_ctrl(struct mlx5_core_dev *dev)
397 struct mlx5_reg_host_endianess he_in;
398 struct mlx5_reg_host_endianess he_out;
401 memset(&he_in, 0, sizeof(he_in));
402 he_in.he = MLX5_SET_HOST_ENDIANNESS;
403 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
404 &he_out, sizeof(he_out),
405 MLX5_REG_HOST_ENDIANNESS, 0, 1);
409 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
411 u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
412 u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
414 memset(in, 0, sizeof(in));
415 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
416 memset(out, 0, sizeof(out));
417 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
421 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
423 u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
424 u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
426 memset(in, 0, sizeof(in));
428 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
429 memset(out, 0, sizeof(out));
430 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
434 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
436 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
437 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
438 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
439 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
443 memset(query_in, 0, sizeof(query_in));
444 memset(query_out, 0, sizeof(query_out));
446 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
448 err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
449 query_out, sizeof(query_out));
451 if (((struct mlx5_outbox_hdr *)query_out)->status ==
452 MLX5_CMD_STAT_BAD_OP_ERR) {
453 pr_debug("Only ISSI 0 is supported\n");
457 printf("mlx5_core: ERR: ""failed to query ISSI\n");
461 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
463 if (sup_issi & (1 << 1)) {
464 memset(set_in, 0, sizeof(set_in));
465 memset(set_out, 0, sizeof(set_out));
467 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
468 MLX5_SET(set_issi_in, set_in, current_issi, 1);
470 err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
471 set_out, sizeof(set_out));
473 printf("mlx5_core: ERR: ""failed to set ISSI=1\n");
480 } else if (sup_issi & (1 << 0)) {
488 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
490 struct mlx5_eq_table *table = &dev->priv.eq_table;
494 spin_lock(&table->lock);
495 list_for_each_entry(eq, &table->comp_eqs_list, list) {
496 if (eq->index == vector) {
503 spin_unlock(&table->lock);
507 EXPORT_SYMBOL(mlx5_vector2eqn);
509 int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name)
511 struct mlx5_priv *priv = &dev->priv;
512 struct mlx5_eq_table *table = &priv->eq_table;
516 spin_lock(&table->lock);
517 list_for_each_entry(eq, &table->comp_eqs_list, list) {
518 if (eq->index == eq_ix) {
519 int irq_ix = eq_ix + MLX5_EQ_VEC_COMP_BASE;
521 snprintf(priv->irq_info[irq_ix].name, MLX5_MAX_IRQ_NAME,
522 "%s-%d", name, eq_ix);
528 spin_unlock(&table->lock);
533 static void free_comp_eqs(struct mlx5_core_dev *dev)
535 struct mlx5_eq_table *table = &dev->priv.eq_table;
536 struct mlx5_eq *eq, *n;
538 spin_lock(&table->lock);
539 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
541 spin_unlock(&table->lock);
542 if (mlx5_destroy_unmap_eq(dev, eq))
543 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
546 spin_lock(&table->lock);
548 spin_unlock(&table->lock);
551 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
553 struct mlx5_eq_table *table = &dev->priv.eq_table;
554 char name[MLX5_MAX_IRQ_NAME];
561 INIT_LIST_HEAD(&table->comp_eqs_list);
562 ncomp_vec = table->num_comp_vectors;
563 nent = MLX5_COMP_EQ_SIZE;
564 for (i = 0; i < ncomp_vec; i++) {
565 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
567 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
568 err = mlx5_create_map_eq(dev, eq,
569 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
570 name, &dev->priv.uuari.uars[0]);
575 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
577 spin_lock(&table->lock);
578 list_add_tail(&eq->list, &table->comp_eqs_list);
579 spin_unlock(&table->lock);
589 static int map_bf_area(struct mlx5_core_dev *dev)
591 resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
592 resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
594 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
596 return dev->priv.bf_mapping ? 0 : -ENOMEM;
599 static void unmap_bf_area(struct mlx5_core_dev *dev)
601 if (dev->priv.bf_mapping)
602 io_mapping_free(dev->priv.bf_mapping);
605 static inline int fw_initializing(struct mlx5_core_dev *dev)
607 return ioread32be(&dev->iseg->initializing) >> 31;
610 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
612 u64 end = jiffies + msecs_to_jiffies(max_wait_mili);
615 while (fw_initializing(dev)) {
616 if (time_after(jiffies, end)) {
620 msleep(FW_INIT_WAIT_MS);
626 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
628 struct mlx5_priv *priv = &dev->priv;
632 pci_set_drvdata(dev->pdev, dev);
633 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
634 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
636 mutex_init(&priv->pgdir_mutex);
637 INIT_LIST_HEAD(&priv->pgdir_list);
638 spin_lock_init(&priv->mkey_lock);
640 priv->numa_node = NUMA_NO_NODE;
642 err = pci_enable_device(pdev);
644 device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI device, aborting\n");
648 err = request_bar(pdev);
650 device_printf((&pdev->dev)->bsddev, "ERR: ""error requesting BARs, aborting\n");
654 pci_set_master(pdev);
656 err = set_dma_caps(pdev);
658 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed setting DMA capabilities mask, aborting\n");
662 dev->iseg = ioremap(pci_resource_start(dev->pdev, 0),
666 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n");
669 device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
671 err = mlx5_cmd_init(dev);
673 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n");
677 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
679 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI);
680 goto err_cmd_cleanup;
683 mlx5_pagealloc_init(dev);
685 err = mlx5_core_enable_hca(dev);
687 device_printf((&pdev->dev)->bsddev, "ERR: ""enable hca failed\n");
688 goto err_pagealloc_cleanup;
691 err = mlx5_core_set_issi(dev);
693 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to set issi\n");
694 goto err_disable_hca;
697 err = mlx5_pagealloc_start(dev);
699 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pagealloc_start failed\n");
700 goto err_disable_hca;
703 err = mlx5_satisfy_startup_pages(dev, 1);
705 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate boot pages\n");
706 goto err_pagealloc_stop;
709 err = set_hca_ctrl(dev);
711 device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n");
712 goto reclaim_boot_pages;
715 err = handle_hca_cap(dev);
717 device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n");
718 goto reclaim_boot_pages;
721 err = mlx5_satisfy_startup_pages(dev, 0);
723 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate init pages\n");
724 goto reclaim_boot_pages;
727 err = mlx5_cmd_init_hca(dev);
729 device_printf((&pdev->dev)->bsddev, "ERR: ""init hca failed\n");
730 goto reclaim_boot_pages;
733 mlx5_start_health_poll(dev);
735 err = mlx5_query_hca_caps(dev);
737 device_printf((&pdev->dev)->bsddev, "ERR: ""query hca failed\n");
741 err = mlx5_query_board_id(dev);
743 device_printf((&pdev->dev)->bsddev, "ERR: ""query board id failed\n");
747 err = mlx5_enable_msix(dev);
749 device_printf((&pdev->dev)->bsddev, "ERR: ""enable msix failed\n");
753 err = mlx5_eq_init(dev);
755 device_printf((&pdev->dev)->bsddev, "ERR: ""failed to initialize eq\n");
759 err = mlx5_alloc_uuars(dev, &priv->uuari);
761 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed allocating uar, aborting\n");
765 err = mlx5_start_eqs(dev);
767 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to start pages and async EQs\n");
771 err = alloc_comp_eqs(dev);
773 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to alloc completion EQs\n");
777 if (map_bf_area(dev))
778 device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to map blue flame area\n");
780 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
782 mlx5_init_cq_table(dev);
783 mlx5_init_qp_table(dev);
784 mlx5_init_srq_table(dev);
785 mlx5_init_mr_table(dev);
793 mlx5_free_uuars(dev, &priv->uuari);
796 mlx5_eq_cleanup(dev);
799 mlx5_disable_msix(dev);
802 mlx5_stop_health_poll(dev);
803 if (mlx5_cmd_teardown_hca(dev)) {
804 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
809 mlx5_reclaim_startup_pages(dev);
812 mlx5_pagealloc_stop(dev);
815 mlx5_core_disable_hca(dev);
817 err_pagealloc_cleanup:
818 mlx5_pagealloc_cleanup(dev);
820 mlx5_cmd_cleanup(dev);
826 pci_clear_master(dev->pdev);
827 release_bar(dev->pdev);
830 pci_disable_device(dev->pdev);
836 static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
838 struct mlx5_priv *priv = &dev->priv;
840 mlx5_cleanup_mr_table(dev);
841 mlx5_cleanup_srq_table(dev);
842 mlx5_cleanup_qp_table(dev);
843 mlx5_cleanup_cq_table(dev);
847 mlx5_free_uuars(dev, &priv->uuari);
848 mlx5_eq_cleanup(dev);
849 mlx5_disable_msix(dev);
850 mlx5_stop_health_poll(dev);
851 if (mlx5_cmd_teardown_hca(dev)) {
852 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
855 mlx5_pagealloc_stop(dev);
856 mlx5_reclaim_startup_pages(dev);
857 mlx5_core_disable_hca(dev);
858 mlx5_pagealloc_cleanup(dev);
859 mlx5_cmd_cleanup(dev);
861 pci_clear_master(dev->pdev);
862 release_bar(dev->pdev);
863 pci_disable_device(dev->pdev);
866 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
868 struct mlx5_device_context *dev_ctx;
869 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
871 dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
873 dev_ctx->intf = intf;
874 dev_ctx->context = intf->add(dev);
876 if (dev_ctx->context) {
877 spin_lock_irq(&priv->ctx_lock);
878 list_add_tail(&dev_ctx->list, &priv->ctx_list);
879 spin_unlock_irq(&priv->ctx_lock);
885 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
887 struct mlx5_device_context *dev_ctx;
888 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
890 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
891 if (dev_ctx->intf == intf) {
892 spin_lock_irq(&priv->ctx_lock);
893 list_del(&dev_ctx->list);
894 spin_unlock_irq(&priv->ctx_lock);
896 intf->remove(dev, dev_ctx->context);
901 static int mlx5_register_device(struct mlx5_core_dev *dev)
903 struct mlx5_priv *priv = &dev->priv;
904 struct mlx5_interface *intf;
906 mutex_lock(&intf_mutex);
907 list_add_tail(&priv->dev_list, &dev_list);
908 list_for_each_entry(intf, &intf_list, list)
909 mlx5_add_device(intf, priv);
910 mutex_unlock(&intf_mutex);
914 static void mlx5_unregister_device(struct mlx5_core_dev *dev)
916 struct mlx5_priv *priv = &dev->priv;
917 struct mlx5_interface *intf;
919 mutex_lock(&intf_mutex);
920 list_for_each_entry(intf, &intf_list, list)
921 mlx5_remove_device(intf, priv);
922 list_del(&priv->dev_list);
923 mutex_unlock(&intf_mutex);
926 int mlx5_register_interface(struct mlx5_interface *intf)
928 struct mlx5_priv *priv;
930 if (!intf->add || !intf->remove)
933 mutex_lock(&intf_mutex);
934 list_add_tail(&intf->list, &intf_list);
935 list_for_each_entry(priv, &dev_list, dev_list)
936 mlx5_add_device(intf, priv);
937 mutex_unlock(&intf_mutex);
941 EXPORT_SYMBOL(mlx5_register_interface);
943 void mlx5_unregister_interface(struct mlx5_interface *intf)
945 struct mlx5_priv *priv;
947 mutex_lock(&intf_mutex);
948 list_for_each_entry(priv, &dev_list, dev_list)
949 mlx5_remove_device(intf, priv);
950 list_del(&intf->list);
951 mutex_unlock(&intf_mutex);
953 EXPORT_SYMBOL(mlx5_unregister_interface);
955 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
957 struct mlx5_priv *priv = &mdev->priv;
958 struct mlx5_device_context *dev_ctx;
962 spin_lock_irqsave(&priv->ctx_lock, flags);
964 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
965 if ((dev_ctx->intf->protocol == protocol) &&
966 dev_ctx->intf->get_dev) {
967 result = dev_ctx->intf->get_dev(dev_ctx->context);
971 spin_unlock_irqrestore(&priv->ctx_lock, flags);
975 EXPORT_SYMBOL(mlx5_get_protocol_dev);
977 static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
980 struct mlx5_priv *priv = &dev->priv;
981 struct mlx5_device_context *dev_ctx;
984 spin_lock_irqsave(&priv->ctx_lock, flags);
986 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
987 if (dev_ctx->intf->event)
988 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
990 spin_unlock_irqrestore(&priv->ctx_lock, flags);
993 struct mlx5_core_event_handler {
994 void (*event)(struct mlx5_core_dev *dev,
995 enum mlx5_dev_event event,
1000 static int init_one(struct pci_dev *pdev,
1001 const struct pci_device_id *id)
1003 struct mlx5_core_dev *dev;
1004 struct mlx5_priv *priv;
1007 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1010 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profiles)) {
1011 printf("mlx5_core: WARN: ""selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF);
1012 prof_sel = MLX5_DEFAULT_PROF;
1014 dev->profile = &profiles[prof_sel];
1015 dev->event = mlx5_core_event;
1017 INIT_LIST_HEAD(&priv->ctx_list);
1018 spin_lock_init(&priv->ctx_lock);
1019 err = mlx5_dev_init(dev, pdev);
1021 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_dev_init failed %d\n", err);
1025 err = mlx5_register_device(dev);
1027 device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_register_device failed %d\n", err);
1035 mlx5_dev_cleanup(dev);
1041 static void remove_one(struct pci_dev *pdev)
1043 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1045 mlx5_unregister_device(dev);
1046 mlx5_dev_cleanup(dev);
1050 static const struct pci_device_id mlx5_core_pci_table[] = {
1051 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
1052 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
1053 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
1054 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
1055 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
1056 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
1057 { PCI_VDEVICE(MELLANOX, 4119) },
1058 { PCI_VDEVICE(MELLANOX, 4120) },
1059 { PCI_VDEVICE(MELLANOX, 4121) },
1060 { PCI_VDEVICE(MELLANOX, 4122) },
1061 { PCI_VDEVICE(MELLANOX, 4123) },
1062 { PCI_VDEVICE(MELLANOX, 4124) },
1063 { PCI_VDEVICE(MELLANOX, 4125) },
1064 { PCI_VDEVICE(MELLANOX, 4126) },
1065 { PCI_VDEVICE(MELLANOX, 4127) },
1066 { PCI_VDEVICE(MELLANOX, 4128) },
1067 { PCI_VDEVICE(MELLANOX, 4129) },
1068 { PCI_VDEVICE(MELLANOX, 4130) },
1069 { PCI_VDEVICE(MELLANOX, 4131) },
1070 { PCI_VDEVICE(MELLANOX, 4132) },
1071 { PCI_VDEVICE(MELLANOX, 4133) },
1072 { PCI_VDEVICE(MELLANOX, 4134) },
1073 { PCI_VDEVICE(MELLANOX, 4135) },
1074 { PCI_VDEVICE(MELLANOX, 4136) },
1075 { PCI_VDEVICE(MELLANOX, 4137) },
1076 { PCI_VDEVICE(MELLANOX, 4138) },
1077 { PCI_VDEVICE(MELLANOX, 4139) },
1078 { PCI_VDEVICE(MELLANOX, 4140) },
1079 { PCI_VDEVICE(MELLANOX, 4141) },
1080 { PCI_VDEVICE(MELLANOX, 4142) },
1081 { PCI_VDEVICE(MELLANOX, 4143) },
1082 { PCI_VDEVICE(MELLANOX, 4144) },
1086 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1088 static struct pci_driver mlx5_core_driver = {
1089 .name = DRIVER_NAME,
1090 .id_table = mlx5_core_pci_table,
1092 .remove = remove_one
1095 static int __init init(void)
1099 mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
1100 if (!mlx5_core_wq) {
1106 err = pci_register_driver(&mlx5_core_driver);
1114 mlx5_health_cleanup();
1115 destroy_workqueue(mlx5_core_wq);
1120 static void __exit cleanup(void)
1122 pci_unregister_driver(&mlx5_core_driver);
1123 mlx5_health_cleanup();
1124 destroy_workqueue(mlx5_core_wq);
1128 module_exit(cleanup);