2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
41 #include "ecore_gtt_reg_addr.h"
43 #include "ecore_chain.h"
44 #include "ecore_status.h"
46 #include "ecore_rt_defs.h"
47 #include "ecore_init_ops.h"
48 #include "ecore_int.h"
49 #include "ecore_cxt.h"
50 #include "ecore_spq.h"
51 #include "ecore_init_fw_funcs.h"
52 #include "ecore_sp_commands.h"
53 #include "ecore_dev_api.h"
54 #include "ecore_l2_api.h"
55 #include "ecore_mcp.h"
56 #include "ecore_hw_defs.h"
57 #include "mcp_public.h"
58 #include "ecore_iro.h"
60 #include "ecore_dev_api.h"
61 #include "ecore_dbg_fw_funcs.h"
63 #include "qlnx_ioctl.h"
69 static int qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
72 static struct cdevsw qlnx_cdevsw = {
73 .d_version = D_VERSION,
74 .d_ioctl = qlnx_eioctl,
75 .d_name = "qlnxioctl",
79 qlnx_make_cdev(qlnx_host_t *ha)
81 ha->ioctl_dev = make_dev(&qlnx_cdevsw,
89 if (ha->ioctl_dev == NULL)
92 ha->ioctl_dev->si_drv1 = ha;
98 qlnx_del_cdev(qlnx_host_t *ha)
100 if (ha->ioctl_dev != NULL)
101 destroy_dev(ha->ioctl_dev);
106 qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
109 struct ecore_hwfn *p_hwfn;
110 struct ecore_ptt *p_ptt;
112 if (ha->grcdump_dwords[hwfn_index]) {
113 /* the grcdump is already available */
114 *num_dumped_dwords = ha->grcdump_dwords[hwfn_index];
118 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
120 p_hwfn = &ha->cdev.hwfns[hwfn_index];
121 p_ptt = ecore_ptt_acquire(p_hwfn);
124 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
128 if ((rval = ecore_dbg_grc_dump(p_hwfn, p_ptt,
129 ha->grcdump[hwfn_index],
130 (ha->grcdump_size[hwfn_index] >> 2),
131 num_dumped_dwords)) == DBG_STATUS_OK) {
133 ha->grcdump_taken = 1;
135 QL_DPRINT1(ha,"ecore_dbg_grc_dump failed [%d, 0x%x]\n",
138 ecore_ptt_release(p_hwfn, p_ptt);
144 qlnx_get_grc_dump_size(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
148 grcdump->pci_func = ha->pci_func;
150 for (i = 0; i < ha->cdev.num_hwfns; i++)
151 grcdump->grcdump_size[i] = ha->grcdump_size[i];
157 qlnx_get_grc_dump(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
163 grcdump->pci_func = ha->pci_func;
165 for (i = 0; i < ha->cdev.num_hwfns; i++) {
167 if ((ha->grcdump[i] == NULL) || (grcdump->grcdump[i] == NULL) ||
168 (grcdump->grcdump_size[i] < ha->grcdump_size[i]))
171 rval = qlnx_grc_dump(ha, &dwords, i);
176 grcdump->grcdump_dwords[i] = dwords;
178 QL_DPRINT1(ha,"grcdump_dwords[%d] = 0x%x\n", i, dwords);
180 rval = copyout(ha->grcdump[i], grcdump->grcdump[i],
181 ha->grcdump_size[i]);
186 ha->grcdump_dwords[i] = 0;
189 ha->grcdump_taken = 0;
195 qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
198 struct ecore_hwfn *p_hwfn;
199 struct ecore_ptt *p_ptt;
201 if (ha->idle_chk_dwords[hwfn_index]) {
202 /* the idle check is already available */
203 *num_dumped_dwords = ha->idle_chk_dwords[hwfn_index];
207 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
209 p_hwfn = &ha->cdev.hwfns[hwfn_index];
210 p_ptt = ecore_ptt_acquire(p_hwfn);
213 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
217 if ((rval = ecore_dbg_idle_chk_dump(p_hwfn, p_ptt,
218 ha->idle_chk[hwfn_index],
219 (ha->idle_chk_size[hwfn_index] >> 2),
220 num_dumped_dwords)) == DBG_STATUS_OK) {
222 ha->idle_chk_taken = 1;
224 QL_DPRINT1(ha,"ecore_dbg_idle_chk_dump failed [%d, 0x%x]\n",
227 ecore_ptt_release(p_hwfn, p_ptt);
233 qlnx_get_idle_chk_size(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
237 idle_chk->pci_func = ha->pci_func;
239 for (i = 0; i < ha->cdev.num_hwfns; i++)
240 idle_chk->idle_chk_size[i] = ha->idle_chk_size[i];
246 qlnx_get_idle_chk(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
252 idle_chk->pci_func = ha->pci_func;
254 for (i = 0; i < ha->cdev.num_hwfns; i++) {
256 if ((ha->idle_chk[i] == NULL) ||
257 (idle_chk->idle_chk[i] == NULL) ||
258 (idle_chk->idle_chk_size[i] <
259 ha->idle_chk_size[i]))
262 rval = qlnx_idle_chk(ha, &dwords, i);
267 idle_chk->idle_chk_dwords[i] = dwords;
269 QL_DPRINT1(ha,"idle_chk_dwords[%d] = 0x%x\n", i, dwords);
271 rval = copyout(ha->idle_chk[i], idle_chk->idle_chk[i],
272 ha->idle_chk_size[i]);
277 ha->idle_chk_dwords[i] = 0;
279 ha->idle_chk_taken = 0;
285 qlnx_get_trace_cmd_size(qlnx_host_t *ha, int hwfn_index, uint16_t cmd)
288 struct ecore_hwfn *p_hwfn;
289 struct ecore_ptt *p_ptt;
290 uint32_t num_dwords = 0;
292 p_hwfn = &ha->cdev.hwfns[hwfn_index];
293 p_ptt = ecore_ptt_acquire(p_hwfn);
296 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
304 rval = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
309 rval = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
314 rval = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
318 case QLNX_PROTECTION_OVERRIDE:
319 rval = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn,
323 case QLNX_FW_ASSERTS:
324 rval = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
329 if (rval != DBG_STATUS_OK) {
330 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", cmd, rval);
334 ecore_ptt_release(p_hwfn, p_ptt);
336 return ((num_dwords * sizeof (uint32_t)));
340 qlnx_get_trace_size(qlnx_host_t *ha, qlnx_trace_t *trace)
344 trace->pci_func = ha->pci_func;
346 for (i = 0; i < ha->cdev.num_hwfns; i++) {
347 trace->size[i] = qlnx_get_trace_cmd_size(ha, i, trace->cmd);
354 qlnx_get_trace(qlnx_host_t *ha, int hwfn_index, qlnx_trace_t *trace)
357 struct ecore_hwfn *p_hwfn;
358 struct ecore_ptt *p_ptt;
359 uint32_t num_dwords = 0;
362 buffer = qlnx_zalloc(trace->size[hwfn_index]);
363 if (buffer == NULL) {
364 QL_DPRINT1(ha,"qlnx_zalloc [%d, 0x%x]failed\n",
365 hwfn_index, trace->cmd);
368 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
370 p_hwfn = &ha->cdev.hwfns[hwfn_index];
371 p_ptt = ecore_ptt_acquire(p_hwfn);
374 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
375 hwfn_index, trace->cmd);
379 switch (trace->cmd) {
382 rval = ecore_dbg_mcp_trace_dump(p_hwfn, p_ptt,
383 buffer, (trace->size[hwfn_index] >> 2),
388 rval = ecore_dbg_reg_fifo_dump(p_hwfn, p_ptt,
389 buffer, (trace->size[hwfn_index] >> 2),
394 rval = ecore_dbg_igu_fifo_dump(p_hwfn, p_ptt,
395 buffer, (trace->size[hwfn_index] >> 2),
399 case QLNX_PROTECTION_OVERRIDE:
400 rval = ecore_dbg_protection_override_dump(p_hwfn, p_ptt,
401 buffer, (trace->size[hwfn_index] >> 2),
405 case QLNX_FW_ASSERTS:
406 rval = ecore_dbg_fw_asserts_dump(p_hwfn, p_ptt,
407 buffer, (trace->size[hwfn_index] >> 2),
412 if (rval != DBG_STATUS_OK) {
413 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", trace->cmd, rval);
417 ecore_ptt_release(p_hwfn, p_ptt);
419 trace->dwords[hwfn_index] = num_dwords;
422 rval = copyout(buffer, trace->buffer[hwfn_index],
430 qlnx_reg_rd_wr(qlnx_host_t *ha, qlnx_reg_rd_wr_t *reg_rd_wr)
433 struct ecore_hwfn *p_hwfn;
435 if (reg_rd_wr->hwfn_index >= QLNX_MAX_HW_FUNCS) {
439 p_hwfn = &ha->cdev.hwfns[reg_rd_wr->hwfn_index];
441 switch (reg_rd_wr->cmd) {
443 case QLNX_REG_READ_CMD:
444 if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
445 reg_rd_wr->val = qlnx_reg_rd32(p_hwfn,
450 case QLNX_REG_WRITE_CMD:
451 if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
452 qlnx_reg_wr32(p_hwfn, reg_rd_wr->addr,
466 qlnx_rd_wr_pci_config(qlnx_host_t *ha, qlnx_pcicfg_rd_wr_t *pci_cfg_rd_wr)
470 switch (pci_cfg_rd_wr->cmd) {
472 case QLNX_PCICFG_READ:
473 pci_cfg_rd_wr->val = pci_read_config(ha->pci_dev,
475 pci_cfg_rd_wr->width);
478 case QLNX_PCICFG_WRITE:
479 pci_write_config(ha->pci_dev, pci_cfg_rd_wr->reg,
480 pci_cfg_rd_wr->val, pci_cfg_rd_wr->width);
492 qlnx_mac_addr(qlnx_host_t *ha, qlnx_perm_mac_addr_t *mac_addr)
494 bzero(mac_addr->addr, sizeof(mac_addr->addr));
495 snprintf(mac_addr->addr, sizeof(mac_addr->addr),
496 "%02x:%02x:%02x:%02x:%02x:%02x",
497 ha->primary_mac[0], ha->primary_mac[1], ha->primary_mac[2],
498 ha->primary_mac[3], ha->primary_mac[4], ha->primary_mac[5]);
504 qlnx_get_regs(qlnx_host_t *ha, qlnx_get_regs_t *regs)
511 regs->reg_buf_len = 0;
512 outb = regs->reg_buf;
514 for (i = 0; i < ha->cdev.num_hwfns; i++) {
516 rval = qlnx_grc_dump(ha, &dwords, i);
521 regs->reg_buf_len += (dwords << 2);
523 rval = copyout(ha->grcdump[i], outb, ha->grcdump_size[i]);
528 ha->grcdump_dwords[i] = 0;
529 outb += regs->reg_buf_len;
532 ha->grcdump_taken = 0;
538 qlnx_drv_info(qlnx_host_t *ha, qlnx_drvinfo_t *drv_info)
541 extern char qlnx_name_str[];
542 extern char qlnx_ver_str[];
544 bzero(drv_info, sizeof(qlnx_drvinfo_t));
546 snprintf(drv_info->drv_name, sizeof(drv_info->drv_name), "%s",
548 snprintf(drv_info->drv_version, sizeof(drv_info->drv_version), "%s",
550 snprintf(drv_info->mfw_version, sizeof(drv_info->mfw_version), "%s",
552 snprintf(drv_info->stormfw_version, sizeof(drv_info->stormfw_version),
553 "%s", ha->stormfw_ver);
555 drv_info->eeprom_dump_len = ha->flash_size;
557 for (i = 0; i < ha->cdev.num_hwfns; i++) {
558 drv_info->reg_dump_len += ha->grcdump_size[i];
561 snprintf(drv_info->bus_info, sizeof(drv_info->bus_info),
562 "%d:%d:%d", pci_get_bus(ha->pci_dev),
563 pci_get_slot(ha->pci_dev), ha->pci_func);
569 qlnx_dev_settings(qlnx_host_t *ha, qlnx_dev_setting_t *dev_info)
571 struct ecore_hwfn *p_hwfn;
572 struct qlnx_link_output if_link;
574 p_hwfn = &ha->cdev.hwfns[0];
576 qlnx_fill_link(p_hwfn, &if_link);
578 dev_info->supported = if_link.supported_caps;
579 dev_info->advertising = if_link.advertised_caps;
580 dev_info->speed = if_link.speed;
581 dev_info->duplex = if_link.duplex;
582 dev_info->port = ha->pci_func & 0x1;
583 dev_info->autoneg = if_link.autoneg;
589 qlnx_write_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram, uint32_t cmd)
594 if ((nvram->data == NULL) || (nvram->data_len == 0))
597 buf = qlnx_zalloc(nvram->data_len);
599 ret = copyin(nvram->data, buf, nvram->data_len);
601 QL_DPRINT9(ha, "issue cmd = 0x%x data = %p \
602 data_len = 0x%x ret = 0x%x exit\n",
603 cmd, nvram->data, nvram->data_len, ret);
606 ret = ecore_mcp_nvm_write(&ha->cdev, cmd,
607 nvram->offset, buf, nvram->data_len);
610 QL_DPRINT9(ha, "cmd = 0x%x data = %p \
611 data_len = 0x%x resp = 0x%x ret = 0x%x exit\n",
612 cmd, nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
614 free(buf, M_QLNXBUF);
620 qlnx_read_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
625 if ((nvram->data == NULL) || (nvram->data_len == 0))
628 buf = qlnx_zalloc(nvram->data_len);
630 ret = ecore_mcp_nvm_read(&ha->cdev, nvram->offset, buf,
633 QL_DPRINT9(ha, " data = %p data_len = 0x%x \
634 resp = 0x%x ret = 0x%x exit\n",
635 nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
638 ret = copyout(buf, nvram->data, nvram->data_len);
641 free(buf, M_QLNXBUF);
647 qlnx_get_nvram_resp(qlnx_host_t *ha, qlnx_nvram_t *nvram)
652 if ((nvram->data == NULL) || (nvram->data_len == 0))
655 buf = qlnx_zalloc(nvram->data_len);
658 ret = ecore_mcp_nvm_resp(&ha->cdev, buf);
660 QL_DPRINT9(ha, "data = %p data_len = 0x%x \
661 resp = 0x%x ret = 0x%x exit\n",
662 nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
665 ret = copyout(buf, nvram->data, nvram->data_len);
668 free(buf, M_QLNXBUF);
674 qlnx_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
678 switch (nvram->cmd) {
680 case QLNX_NVRAM_CMD_WRITE_NVRAM:
681 ret = qlnx_write_nvram(ha, nvram, ECORE_NVM_WRITE_NVRAM);
684 case QLNX_NVRAM_CMD_PUT_FILE_DATA:
685 ret = qlnx_write_nvram(ha, nvram, ECORE_PUT_FILE_DATA);
688 case QLNX_NVRAM_CMD_READ_NVRAM:
689 ret = qlnx_read_nvram(ha, nvram);
692 case QLNX_NVRAM_CMD_SET_SECURE_MODE:
693 ret = ecore_mcp_nvm_set_secure_mode(&ha->cdev, nvram->offset);
695 QL_DPRINT9(ha, "QLNX_NVRAM_CMD_SET_SECURE_MODE \
696 resp = 0x%x ret = 0x%x exit\n",
697 ha->cdev.mcp_nvm_resp, ret);
700 case QLNX_NVRAM_CMD_DEL_FILE:
701 ret = ecore_mcp_nvm_del_file(&ha->cdev, nvram->offset);
703 QL_DPRINT9(ha, "QLNX_NVRAM_CMD_DEL_FILE \
704 resp = 0x%x ret = 0x%x exit\n",
705 ha->cdev.mcp_nvm_resp, ret);
708 case QLNX_NVRAM_CMD_PUT_FILE_BEGIN:
709 ret = ecore_mcp_nvm_put_file_begin(&ha->cdev, nvram->offset);
711 QL_DPRINT9(ha, "QLNX_NVRAM_CMD_PUT_FILE_BEGIN \
712 resp = 0x%x ret = 0x%x exit\n",
713 ha->cdev.mcp_nvm_resp, ret);
716 case QLNX_NVRAM_CMD_GET_NVRAM_RESP:
717 ret = qlnx_get_nvram_resp(ha, nvram);
729 qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
734 int stats_copied = 0;
736 s_stats->num_hwfns = ha->cdev.num_hwfns;
738 // if (ha->storm_stats_index < QLNX_STORM_STATS_SAMPLES_PER_HWFN)
741 s_stats->num_samples = ha->storm_stats_index;
743 for (i = 0; i < ha->cdev.num_hwfns; i++) {
745 index = (QLNX_STORM_STATS_SAMPLES_PER_HWFN * i);
747 if (s_stats->buffer[i]) {
749 ret = copyout(&ha->storm_stats[index],
751 QLNX_STORM_STATS_BYTES_PER_HWFN);
753 printf("%s [%d]: failed\n", __func__, i);
756 if (s_stats->num_samples ==
757 QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
759 bzero((void *)&ha->storm_stats[i],
760 QLNX_STORM_STATS_BYTES_PER_HWFN);
768 ha->storm_stats_index = 0;
775 qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
784 if ((ha = (qlnx_host_t *)dev->si_drv1) == NULL)
791 case QLNX_GRC_DUMP_SIZE:
792 qlnx_get_grc_dump_size(ha, (qlnx_grcdump_t *)data);
796 rval = qlnx_get_grc_dump(ha, (qlnx_grcdump_t *)data);
799 case QLNX_IDLE_CHK_SIZE:
800 qlnx_get_idle_chk_size(ha, (qlnx_idle_chk_t *)data);
804 rval = qlnx_get_idle_chk(ha, (qlnx_idle_chk_t *)data);
808 rval = qlnx_drv_info(ha, (qlnx_drvinfo_t *)data);
811 case QLNX_DEV_SETTING:
812 rval = qlnx_dev_settings(ha, (qlnx_dev_setting_t *)data);
816 rval = qlnx_get_regs(ha, (qlnx_get_regs_t *)data);
820 rval = qlnx_nvram(ha, (qlnx_nvram_t *)data);
824 rval = qlnx_reg_rd_wr(ha, (qlnx_reg_rd_wr_t *)data);
827 case QLNX_RD_WR_PCICFG:
828 rval = qlnx_rd_wr_pci_config(ha, (qlnx_pcicfg_rd_wr_t *)data);
832 qlnx_mac_addr(ha, (qlnx_perm_mac_addr_t *)data);
835 case QLNX_STORM_STATS:
836 qlnx_storm_stats(ha, (qlnx_storm_stats_dump_t *)data);
839 case QLNX_TRACE_SIZE:
840 qlnx_get_trace_size(ha, (qlnx_trace_t *)data);
844 trace = (qlnx_trace_t *)data;
846 for (i = 0; i < ha->cdev.num_hwfns; i++) {
848 if (trace->size[i] && trace->cmd && trace->buffer[i])
849 rval = qlnx_get_trace(ha, i, trace);