2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
41 #include "ecore_gtt_reg_addr.h"
43 #include "ecore_chain.h"
44 #include "ecore_status.h"
46 #include "ecore_rt_defs.h"
47 #include "ecore_init_ops.h"
48 #include "ecore_int.h"
49 #include "ecore_cxt.h"
50 #include "ecore_spq.h"
51 #include "ecore_init_fw_funcs.h"
52 #include "ecore_sp_commands.h"
53 #include "ecore_dev_api.h"
54 #include "ecore_l2_api.h"
55 #include "ecore_mcp.h"
56 #include "ecore_hw_defs.h"
57 #include "mcp_public.h"
58 #include "ecore_iro.h"
60 #include "ecore_dev_api.h"
61 #include "ecore_dbg_fw_funcs.h"
63 #include "qlnx_ioctl.h"
69 static int qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
72 static struct cdevsw qlnx_cdevsw = {
73 .d_version = D_VERSION,
74 .d_ioctl = qlnx_eioctl,
75 .d_name = "qlnxioctl",
79 qlnx_make_cdev(qlnx_host_t *ha)
81 ha->ioctl_dev = make_dev(&qlnx_cdevsw,
89 if (ha->ioctl_dev == NULL)
92 ha->ioctl_dev->si_drv1 = ha;
98 qlnx_del_cdev(qlnx_host_t *ha)
100 if (ha->ioctl_dev != NULL)
101 destroy_dev(ha->ioctl_dev);
106 qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
109 struct ecore_hwfn *p_hwfn;
110 struct ecore_ptt *p_ptt;
112 if (ha->grcdump_dwords[hwfn_index]) {
113 /* the grcdump is already available */
114 *num_dumped_dwords = ha->grcdump_dwords[hwfn_index];
118 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
120 p_hwfn = &ha->cdev.hwfns[hwfn_index];
121 p_ptt = ecore_ptt_acquire(p_hwfn);
124 QL_DPRINT1(ha, (ha->pci_dev, "%s : ecore_ptt_acquire failed\n",
129 if ((rval = ecore_dbg_grc_dump(p_hwfn, p_ptt,
130 ha->grcdump[hwfn_index],
131 (ha->grcdump_size[hwfn_index] >> 2),
132 num_dumped_dwords)) == DBG_STATUS_OK) {
134 ha->grcdump_taken = 1;
136 QL_DPRINT1(ha, (ha->pci_dev,
137 "%s : ecore_dbg_grc_dump failed [%d, 0x%x]\n",
138 __func__, hwfn_index, rval));
140 ecore_ptt_release(p_hwfn, p_ptt);
146 qlnx_get_grc_dump_size(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
150 grcdump->pci_func = ha->pci_func;
152 for (i = 0; i < ha->cdev.num_hwfns; i++)
153 grcdump->grcdump_size[i] = ha->grcdump_size[i];
159 qlnx_get_grc_dump(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
165 grcdump->pci_func = ha->pci_func;
167 for (i = 0; i < ha->cdev.num_hwfns; i++) {
169 if ((ha->grcdump[i] == NULL) || (grcdump->grcdump[i] == NULL) ||
170 (grcdump->grcdump_size[i] < ha->grcdump_size[i]))
173 rval = qlnx_grc_dump(ha, &dwords, i);
178 grcdump->grcdump_dwords[i] = dwords;
180 QL_DPRINT1(ha, (ha->pci_dev, "%s: grcdump_dwords[%d] = 0x%x\n",
181 __func__, i, dwords));
183 rval = copyout(ha->grcdump[i], grcdump->grcdump[i],
184 ha->grcdump_size[i]);
189 ha->grcdump_dwords[i] = 0;
192 ha->grcdump_taken = 0;
198 qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
201 struct ecore_hwfn *p_hwfn;
202 struct ecore_ptt *p_ptt;
204 if (ha->idle_chk_dwords[hwfn_index]) {
205 /* the idle check is already available */
206 *num_dumped_dwords = ha->idle_chk_dwords[hwfn_index];
210 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
212 p_hwfn = &ha->cdev.hwfns[hwfn_index];
213 p_ptt = ecore_ptt_acquire(p_hwfn);
216 QL_DPRINT1(ha, (ha->pci_dev,
217 "%s : ecore_ptt_acquire failed\n", __func__));
221 if ((rval = ecore_dbg_idle_chk_dump(p_hwfn, p_ptt,
222 ha->idle_chk[hwfn_index],
223 (ha->idle_chk_size[hwfn_index] >> 2),
224 num_dumped_dwords)) == DBG_STATUS_OK) {
226 ha->idle_chk_taken = 1;
228 QL_DPRINT1(ha, (ha->pci_dev,
229 "%s : ecore_dbg_idle_chk_dump failed [%d, 0x%x]\n",
230 __func__, hwfn_index, rval));
232 ecore_ptt_release(p_hwfn, p_ptt);
238 qlnx_get_idle_chk_size(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
242 idle_chk->pci_func = ha->pci_func;
244 for (i = 0; i < ha->cdev.num_hwfns; i++)
245 idle_chk->idle_chk_size[i] = ha->idle_chk_size[i];
251 qlnx_get_idle_chk(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
257 idle_chk->pci_func = ha->pci_func;
259 for (i = 0; i < ha->cdev.num_hwfns; i++) {
261 if ((ha->idle_chk[i] == NULL) ||
262 (idle_chk->idle_chk[i] == NULL) ||
263 (idle_chk->idle_chk_size[i] <
264 ha->idle_chk_size[i]))
267 rval = qlnx_idle_chk(ha, &dwords, i);
272 idle_chk->idle_chk_dwords[i] = dwords;
274 QL_DPRINT1(ha, (ha->pci_dev, "%s: idle_chk_dwords[%d] = 0x%x\n",
275 __func__, i, dwords));
277 rval = copyout(ha->idle_chk[i], idle_chk->idle_chk[i],
278 ha->idle_chk_size[i]);
283 ha->idle_chk_dwords[i] = 0;
285 ha->idle_chk_taken = 0;
291 qlnx_get_trace_cmd_size(qlnx_host_t *ha, int hwfn_index, uint16_t cmd)
294 struct ecore_hwfn *p_hwfn;
295 struct ecore_ptt *p_ptt;
296 uint32_t num_dwords = 0;
298 p_hwfn = &ha->cdev.hwfns[hwfn_index];
299 p_ptt = ecore_ptt_acquire(p_hwfn);
302 QL_DPRINT1(ha, (ha->pci_dev,
303 "%s: ecore_ptt_acquire [%d, 0x%x]failed\n",
304 __func__, hwfn_index, cmd));
311 rval = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
316 rval = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
321 rval = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
325 case QLNX_PROTECTION_OVERRIDE:
326 rval = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn,
330 case QLNX_FW_ASSERTS:
331 rval = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
336 if (rval != DBG_STATUS_OK) {
337 QL_DPRINT1(ha, (ha->pci_dev, "%s : cmd = 0x%x failed [0x%x]\n",
338 __func__, cmd, rval));
342 ecore_ptt_release(p_hwfn, p_ptt);
344 return ((num_dwords * sizeof (uint32_t)));
348 qlnx_get_trace_size(qlnx_host_t *ha, qlnx_trace_t *trace)
352 trace->pci_func = ha->pci_func;
354 for (i = 0; i < ha->cdev.num_hwfns; i++) {
355 trace->size[i] = qlnx_get_trace_cmd_size(ha, i, trace->cmd);
362 qlnx_get_trace(qlnx_host_t *ha, int hwfn_index, qlnx_trace_t *trace)
365 struct ecore_hwfn *p_hwfn;
366 struct ecore_ptt *p_ptt;
367 uint32_t num_dwords = 0;
370 buffer = qlnx_zalloc(trace->size[hwfn_index]);
371 if (buffer == NULL) {
372 QL_DPRINT1(ha, (ha->pci_dev,
373 "%s: qlnx_zalloc [%d, 0x%x]failed\n",
374 __func__, hwfn_index, trace->cmd));
377 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
379 p_hwfn = &ha->cdev.hwfns[hwfn_index];
380 p_ptt = ecore_ptt_acquire(p_hwfn);
383 QL_DPRINT1(ha, (ha->pci_dev,
384 "%s: ecore_ptt_acquire [%d, 0x%x]failed\n",
385 __func__, hwfn_index, trace->cmd));
389 switch (trace->cmd) {
392 rval = ecore_dbg_mcp_trace_dump(p_hwfn, p_ptt,
393 buffer, (trace->size[hwfn_index] >> 2),
398 rval = ecore_dbg_reg_fifo_dump(p_hwfn, p_ptt,
399 buffer, (trace->size[hwfn_index] >> 2),
404 rval = ecore_dbg_igu_fifo_dump(p_hwfn, p_ptt,
405 buffer, (trace->size[hwfn_index] >> 2),
409 case QLNX_PROTECTION_OVERRIDE:
410 rval = ecore_dbg_protection_override_dump(p_hwfn, p_ptt,
411 buffer, (trace->size[hwfn_index] >> 2),
415 case QLNX_FW_ASSERTS:
416 rval = ecore_dbg_fw_asserts_dump(p_hwfn, p_ptt,
417 buffer, (trace->size[hwfn_index] >> 2),
422 if (rval != DBG_STATUS_OK) {
423 QL_DPRINT1(ha, (ha->pci_dev, "%s : cmd = 0x%x failed [0x%x]\n",
424 __func__, trace->cmd, rval));
428 ecore_ptt_release(p_hwfn, p_ptt);
430 trace->dwords[hwfn_index] = num_dwords;
433 rval = copyout(buffer, trace->buffer[hwfn_index],
441 qlnx_reg_rd_wr(qlnx_host_t *ha, qlnx_reg_rd_wr_t *reg_rd_wr)
444 struct ecore_hwfn *p_hwfn;
446 if (reg_rd_wr->hwfn_index >= QLNX_MAX_HW_FUNCS) {
450 p_hwfn = &ha->cdev.hwfns[reg_rd_wr->hwfn_index];
452 switch (reg_rd_wr->cmd) {
454 case QLNX_REG_READ_CMD:
455 if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
456 reg_rd_wr->val = qlnx_reg_rd32(p_hwfn,
461 case QLNX_REG_WRITE_CMD:
462 if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
463 qlnx_reg_wr32(p_hwfn, reg_rd_wr->addr,
477 qlnx_rd_wr_pci_config(qlnx_host_t *ha, qlnx_pcicfg_rd_wr_t *pci_cfg_rd_wr)
481 switch (pci_cfg_rd_wr->cmd) {
483 case QLNX_PCICFG_READ:
484 pci_cfg_rd_wr->val = pci_read_config(ha->pci_dev,
486 pci_cfg_rd_wr->width);
489 case QLNX_PCICFG_WRITE:
490 pci_write_config(ha->pci_dev, pci_cfg_rd_wr->reg,
491 pci_cfg_rd_wr->val, pci_cfg_rd_wr->width);
503 qlnx_mac_addr(qlnx_host_t *ha, qlnx_perm_mac_addr_t *mac_addr)
505 bzero(mac_addr->addr, sizeof(mac_addr->addr));
506 snprintf(mac_addr->addr, sizeof(mac_addr->addr),
507 "%02x:%02x:%02x:%02x:%02x:%02x",
508 ha->primary_mac[0], ha->primary_mac[1], ha->primary_mac[2],
509 ha->primary_mac[3], ha->primary_mac[4], ha->primary_mac[5]);
515 qlnx_get_regs(qlnx_host_t *ha, qlnx_get_regs_t *regs)
522 regs->reg_buf_len = 0;
523 outb = regs->reg_buf;
525 for (i = 0; i < ha->cdev.num_hwfns; i++) {
527 rval = qlnx_grc_dump(ha, &dwords, i);
532 regs->reg_buf_len += (dwords << 2);
534 rval = copyout(ha->grcdump[i], outb, ha->grcdump_size[i]);
539 ha->grcdump_dwords[i] = 0;
540 outb += regs->reg_buf_len;
543 ha->grcdump_taken = 0;
549 qlnx_drv_info(qlnx_host_t *ha, qlnx_drvinfo_t *drv_info)
552 extern char qlnx_name_str[];
553 extern char qlnx_ver_str[];
555 bzero(drv_info, sizeof(qlnx_drvinfo_t));
557 snprintf(drv_info->drv_name, sizeof(drv_info->drv_name), "%s",
559 snprintf(drv_info->drv_version, sizeof(drv_info->drv_version), "%s",
561 snprintf(drv_info->mfw_version, sizeof(drv_info->mfw_version), "%s",
563 snprintf(drv_info->stormfw_version, sizeof(drv_info->stormfw_version),
564 "%s", ha->stormfw_ver);
566 drv_info->eeprom_dump_len = ha->flash_size;
568 for (i = 0; i < ha->cdev.num_hwfns; i++) {
569 drv_info->reg_dump_len += ha->grcdump_size[i];
572 snprintf(drv_info->bus_info, sizeof(drv_info->bus_info),
573 "%d:%d:%d", pci_get_bus(ha->pci_dev),
574 pci_get_slot(ha->pci_dev), ha->pci_func);
580 qlnx_dev_settings(qlnx_host_t *ha, qlnx_dev_setting_t *dev_info)
582 struct ecore_hwfn *p_hwfn;
583 struct qlnx_link_output if_link;
585 p_hwfn = &ha->cdev.hwfns[0];
587 qlnx_fill_link(p_hwfn, &if_link);
589 dev_info->supported = if_link.supported_caps;
590 dev_info->advertising = if_link.advertised_caps;
591 dev_info->speed = if_link.speed;
592 dev_info->duplex = if_link.duplex;
593 dev_info->port = ha->pci_func & 0x1;
594 dev_info->autoneg = if_link.autoneg;
600 qlnx_write_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram, uint32_t cmd)
605 if ((nvram->data == NULL) || (nvram->data_len == 0))
608 buf = qlnx_zalloc(nvram->data_len);
610 ret = copyin(nvram->data, buf, nvram->data_len);
613 (ha->pci_dev, "%s: issue cmd = 0x%x data = %p "
614 " data_len = 0x%x ret = 0x%x exit\n", __func__,
615 cmd, nvram->data, nvram->data_len, ret));
618 ret = ecore_mcp_nvm_write(&ha->cdev, cmd,
619 nvram->offset, buf, nvram->data_len);
623 (ha->pci_dev, "%s: cmd = 0x%x data = %p "
624 " data_len = 0x%x resp = 0x%x ret = 0x%x exit\n",
625 __func__, cmd, nvram->data, nvram->data_len,
626 ha->cdev.mcp_nvm_resp, ret));
628 free(buf, M_QLNXBUF);
634 qlnx_read_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
639 if ((nvram->data == NULL) || (nvram->data_len == 0))
642 buf = qlnx_zalloc(nvram->data_len);
644 ret = ecore_mcp_nvm_read(&ha->cdev, nvram->offset, buf,
647 QL_DPRINT9(ha, (ha->pci_dev, "%s: data = %p data_len = 0x%x "
648 " resp = 0x%x ret = 0x%x exit\n", __func__,
649 nvram->data, nvram->data_len,
650 ha->cdev.mcp_nvm_resp, ret));
653 ret = copyout(buf, nvram->data, nvram->data_len);
656 free(buf, M_QLNXBUF);
662 qlnx_get_nvram_resp(qlnx_host_t *ha, qlnx_nvram_t *nvram)
667 if ((nvram->data == NULL) || (nvram->data_len == 0))
670 buf = qlnx_zalloc(nvram->data_len);
673 ret = ecore_mcp_nvm_resp(&ha->cdev, buf);
675 QL_DPRINT9(ha, (ha->pci_dev, "%s: data = %p data_len = 0x%x "
676 " resp = 0x%x ret = 0x%x exit\n", __func__,
677 nvram->data, nvram->data_len,
678 ha->cdev.mcp_nvm_resp, ret));
681 ret = copyout(buf, nvram->data, nvram->data_len);
684 free(buf, M_QLNXBUF);
690 qlnx_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
694 switch (nvram->cmd) {
696 case QLNX_NVRAM_CMD_WRITE_NVRAM:
697 ret = qlnx_write_nvram(ha, nvram, ECORE_NVM_WRITE_NVRAM);
700 case QLNX_NVRAM_CMD_PUT_FILE_DATA:
701 ret = qlnx_write_nvram(ha, nvram, ECORE_PUT_FILE_DATA);
704 case QLNX_NVRAM_CMD_READ_NVRAM:
705 ret = qlnx_read_nvram(ha, nvram);
708 case QLNX_NVRAM_CMD_SET_SECURE_MODE:
709 ret = ecore_mcp_nvm_set_secure_mode(&ha->cdev, nvram->offset);
711 QL_DPRINT9(ha, (ha->pci_dev,
712 "%s: QLNX_NVRAM_CMD_SET_SECURE_MODE "
713 " resp = 0x%x ret = 0x%x exit\n", __func__,
714 ha->cdev.mcp_nvm_resp, ret));
717 case QLNX_NVRAM_CMD_DEL_FILE:
718 ret = ecore_mcp_nvm_del_file(&ha->cdev, nvram->offset);
720 QL_DPRINT9(ha, (ha->pci_dev,
721 "%s: QLNX_NVRAM_CMD_DEL_FILE "
722 " resp = 0x%x ret = 0x%x exit\n", __func__,
723 ha->cdev.mcp_nvm_resp, ret));
726 case QLNX_NVRAM_CMD_PUT_FILE_BEGIN:
727 ret = ecore_mcp_nvm_put_file_begin(&ha->cdev, nvram->offset);
729 QL_DPRINT9(ha, (ha->pci_dev,
730 "%s: QLNX_NVRAM_CMD_PUT_FILE_BEGIN "
731 " resp = 0x%x ret = 0x%x exit\n", __func__,
732 ha->cdev.mcp_nvm_resp, ret));
735 case QLNX_NVRAM_CMD_GET_NVRAM_RESP:
736 ret = qlnx_get_nvram_resp(ha, nvram);
748 qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
753 int stats_copied = 0;
755 s_stats->num_hwfns = ha->cdev.num_hwfns;
757 // if (ha->storm_stats_index < QLNX_STORM_STATS_SAMPLES_PER_HWFN)
760 s_stats->num_samples = ha->storm_stats_index;
762 for (i = 0; i < ha->cdev.num_hwfns; i++) {
764 index = (QLNX_STORM_STATS_SAMPLES_PER_HWFN * i);
766 if (s_stats->buffer[i]) {
768 ret = copyout(&ha->storm_stats[index],
770 QLNX_STORM_STATS_BYTES_PER_HWFN);
772 printf("%s [%d]: failed\n", __func__, i);
775 if (s_stats->num_samples ==
776 QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
778 bzero((void *)&ha->storm_stats[i],
779 QLNX_STORM_STATS_BYTES_PER_HWFN);
787 ha->storm_stats_index = 0;
794 qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
803 if ((ha = (qlnx_host_t *)dev->si_drv1) == NULL)
810 case QLNX_GRC_DUMP_SIZE:
811 qlnx_get_grc_dump_size(ha, (qlnx_grcdump_t *)data);
815 rval = qlnx_get_grc_dump(ha, (qlnx_grcdump_t *)data);
818 case QLNX_IDLE_CHK_SIZE:
819 qlnx_get_idle_chk_size(ha, (qlnx_idle_chk_t *)data);
823 rval = qlnx_get_idle_chk(ha, (qlnx_idle_chk_t *)data);
827 rval = qlnx_drv_info(ha, (qlnx_drvinfo_t *)data);
830 case QLNX_DEV_SETTING:
831 rval = qlnx_dev_settings(ha, (qlnx_dev_setting_t *)data);
835 rval = qlnx_get_regs(ha, (qlnx_get_regs_t *)data);
839 rval = qlnx_nvram(ha, (qlnx_nvram_t *)data);
843 rval = qlnx_reg_rd_wr(ha, (qlnx_reg_rd_wr_t *)data);
846 case QLNX_RD_WR_PCICFG:
847 rval = qlnx_rd_wr_pci_config(ha, (qlnx_pcicfg_rd_wr_t *)data);
851 qlnx_mac_addr(ha, (qlnx_perm_mac_addr_t *)data);
854 case QLNX_STORM_STATS:
855 qlnx_storm_stats(ha, (qlnx_storm_stats_dump_t *)data);
858 case QLNX_TRACE_SIZE:
859 qlnx_get_trace_size(ha, (qlnx_trace_t *)data);
863 trace = (qlnx_trace_t *)data;
865 for (i = 0; i < ha->cdev.num_hwfns; i++) {
867 if (trace->size[i] && trace->cmd && trace->buffer[i])
868 rval = qlnx_get_trace(ha, i, trace);