2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
31 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
41 #include "ecore_gtt_reg_addr.h"
43 #include "ecore_chain.h"
44 #include "ecore_status.h"
46 #include "ecore_rt_defs.h"
47 #include "ecore_init_ops.h"
48 #include "ecore_int.h"
49 #include "ecore_cxt.h"
50 #include "ecore_spq.h"
51 #include "ecore_init_fw_funcs.h"
52 #include "ecore_sp_commands.h"
53 #include "ecore_dev_api.h"
54 #include "ecore_l2_api.h"
55 #include "ecore_mcp.h"
56 #include "ecore_hw_defs.h"
57 #include "mcp_public.h"
58 #include "ecore_iro.h"
60 #include "ecore_dev_api.h"
61 #include "ecore_dbg_fw_funcs.h"
62 #include "ecore_dcbx_api.h"
64 #include "qlnx_ioctl.h"
70 static int qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
73 static struct cdevsw qlnx_cdevsw = {
74 .d_version = D_VERSION,
75 .d_ioctl = qlnx_eioctl,
76 .d_name = "qlnxioctl",
80 qlnx_make_cdev(qlnx_host_t *ha)
82 ha->ioctl_dev = make_dev(&qlnx_cdevsw,
90 if (ha->ioctl_dev == NULL)
93 ha->ioctl_dev->si_drv1 = ha;
99 qlnx_del_cdev(qlnx_host_t *ha)
101 if (ha->ioctl_dev != NULL)
102 destroy_dev(ha->ioctl_dev);
107 qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
110 struct ecore_hwfn *p_hwfn;
111 struct ecore_ptt *p_ptt;
113 if (ha->grcdump_dwords[hwfn_index]) {
114 /* the grcdump is already available */
115 *num_dumped_dwords = ha->grcdump_dwords[hwfn_index];
119 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
121 p_hwfn = &ha->cdev.hwfns[hwfn_index];
122 p_ptt = ecore_ptt_acquire(p_hwfn);
125 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
129 if ((rval = ecore_dbg_grc_dump(p_hwfn, p_ptt,
130 ha->grcdump[hwfn_index],
131 (ha->grcdump_size[hwfn_index] >> 2),
132 num_dumped_dwords)) == DBG_STATUS_OK) {
134 ha->grcdump_taken = 1;
136 QL_DPRINT1(ha,"ecore_dbg_grc_dump failed [%d, 0x%x]\n",
139 ecore_ptt_release(p_hwfn, p_ptt);
145 qlnx_get_grc_dump_size(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
149 grcdump->pci_func = ha->pci_func;
151 for (i = 0; i < ha->cdev.num_hwfns; i++)
152 grcdump->grcdump_size[i] = ha->grcdump_size[i];
158 qlnx_get_grc_dump(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
164 grcdump->pci_func = ha->pci_func;
166 for (i = 0; i < ha->cdev.num_hwfns; i++) {
168 if ((ha->grcdump[i] == NULL) || (grcdump->grcdump[i] == NULL) ||
169 (grcdump->grcdump_size[i] < ha->grcdump_size[i]))
172 rval = qlnx_grc_dump(ha, &dwords, i);
177 grcdump->grcdump_dwords[i] = dwords;
179 QL_DPRINT1(ha,"grcdump_dwords[%d] = 0x%x\n", i, dwords);
181 rval = copyout(ha->grcdump[i], grcdump->grcdump[i],
182 ha->grcdump_size[i]);
187 ha->grcdump_dwords[i] = 0;
190 ha->grcdump_taken = 0;
196 qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
199 struct ecore_hwfn *p_hwfn;
200 struct ecore_ptt *p_ptt;
202 if (ha->idle_chk_dwords[hwfn_index]) {
203 /* the idle check is already available */
204 *num_dumped_dwords = ha->idle_chk_dwords[hwfn_index];
208 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
210 p_hwfn = &ha->cdev.hwfns[hwfn_index];
211 p_ptt = ecore_ptt_acquire(p_hwfn);
214 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
218 if ((rval = ecore_dbg_idle_chk_dump(p_hwfn, p_ptt,
219 ha->idle_chk[hwfn_index],
220 (ha->idle_chk_size[hwfn_index] >> 2),
221 num_dumped_dwords)) == DBG_STATUS_OK) {
223 ha->idle_chk_taken = 1;
225 QL_DPRINT1(ha,"ecore_dbg_idle_chk_dump failed [%d, 0x%x]\n",
228 ecore_ptt_release(p_hwfn, p_ptt);
234 qlnx_get_idle_chk_size(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
238 idle_chk->pci_func = ha->pci_func;
240 for (i = 0; i < ha->cdev.num_hwfns; i++)
241 idle_chk->idle_chk_size[i] = ha->idle_chk_size[i];
247 qlnx_get_idle_chk(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
253 idle_chk->pci_func = ha->pci_func;
255 for (i = 0; i < ha->cdev.num_hwfns; i++) {
257 if ((ha->idle_chk[i] == NULL) ||
258 (idle_chk->idle_chk[i] == NULL) ||
259 (idle_chk->idle_chk_size[i] <
260 ha->idle_chk_size[i]))
263 rval = qlnx_idle_chk(ha, &dwords, i);
268 idle_chk->idle_chk_dwords[i] = dwords;
270 QL_DPRINT1(ha,"idle_chk_dwords[%d] = 0x%x\n", i, dwords);
272 rval = copyout(ha->idle_chk[i], idle_chk->idle_chk[i],
273 ha->idle_chk_size[i]);
278 ha->idle_chk_dwords[i] = 0;
280 ha->idle_chk_taken = 0;
286 qlnx_get_trace_cmd_size(qlnx_host_t *ha, int hwfn_index, uint16_t cmd)
289 struct ecore_hwfn *p_hwfn;
290 struct ecore_ptt *p_ptt;
291 uint32_t num_dwords = 0;
293 p_hwfn = &ha->cdev.hwfns[hwfn_index];
294 p_ptt = ecore_ptt_acquire(p_hwfn);
297 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
305 rval = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
310 rval = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
315 rval = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
319 case QLNX_PROTECTION_OVERRIDE:
320 rval = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn,
324 case QLNX_FW_ASSERTS:
325 rval = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
330 if (rval != DBG_STATUS_OK) {
331 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", cmd, rval);
335 ecore_ptt_release(p_hwfn, p_ptt);
337 return ((num_dwords * sizeof (uint32_t)));
341 qlnx_get_trace_size(qlnx_host_t *ha, qlnx_trace_t *trace)
345 trace->pci_func = ha->pci_func;
347 for (i = 0; i < ha->cdev.num_hwfns; i++) {
348 trace->size[i] = qlnx_get_trace_cmd_size(ha, i, trace->cmd);
355 qlnx_get_trace(qlnx_host_t *ha, int hwfn_index, qlnx_trace_t *trace)
358 struct ecore_hwfn *p_hwfn;
359 struct ecore_ptt *p_ptt;
360 uint32_t num_dwords = 0;
363 buffer = qlnx_zalloc(trace->size[hwfn_index]);
364 if (buffer == NULL) {
365 QL_DPRINT1(ha,"qlnx_zalloc [%d, 0x%x]failed\n",
366 hwfn_index, trace->cmd);
369 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
371 p_hwfn = &ha->cdev.hwfns[hwfn_index];
372 p_ptt = ecore_ptt_acquire(p_hwfn);
375 QL_DPRINT1(ha, "ecore_ptt_acquire [%d, 0x%x]failed\n",
376 hwfn_index, trace->cmd);
380 switch (trace->cmd) {
383 rval = ecore_dbg_mcp_trace_dump(p_hwfn, p_ptt,
384 buffer, (trace->size[hwfn_index] >> 2),
389 rval = ecore_dbg_reg_fifo_dump(p_hwfn, p_ptt,
390 buffer, (trace->size[hwfn_index] >> 2),
395 rval = ecore_dbg_igu_fifo_dump(p_hwfn, p_ptt,
396 buffer, (trace->size[hwfn_index] >> 2),
400 case QLNX_PROTECTION_OVERRIDE:
401 rval = ecore_dbg_protection_override_dump(p_hwfn, p_ptt,
402 buffer, (trace->size[hwfn_index] >> 2),
406 case QLNX_FW_ASSERTS:
407 rval = ecore_dbg_fw_asserts_dump(p_hwfn, p_ptt,
408 buffer, (trace->size[hwfn_index] >> 2),
413 if (rval != DBG_STATUS_OK) {
414 QL_DPRINT1(ha,"cmd = 0x%x failed [0x%x]\n", trace->cmd, rval);
418 ecore_ptt_release(p_hwfn, p_ptt);
420 trace->dwords[hwfn_index] = num_dwords;
423 rval = copyout(buffer, trace->buffer[hwfn_index],
431 qlnx_reg_rd_wr(qlnx_host_t *ha, qlnx_reg_rd_wr_t *reg_rd_wr)
434 struct ecore_hwfn *p_hwfn;
436 if (reg_rd_wr->hwfn_index >= QLNX_MAX_HW_FUNCS) {
440 p_hwfn = &ha->cdev.hwfns[reg_rd_wr->hwfn_index];
442 switch (reg_rd_wr->cmd) {
444 case QLNX_REG_READ_CMD:
445 if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
446 reg_rd_wr->val = qlnx_reg_rd32(p_hwfn,
451 case QLNX_REG_WRITE_CMD:
452 if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
453 qlnx_reg_wr32(p_hwfn, reg_rd_wr->addr,
467 qlnx_rd_wr_pci_config(qlnx_host_t *ha, qlnx_pcicfg_rd_wr_t *pci_cfg_rd_wr)
471 switch (pci_cfg_rd_wr->cmd) {
473 case QLNX_PCICFG_READ:
474 pci_cfg_rd_wr->val = pci_read_config(ha->pci_dev,
476 pci_cfg_rd_wr->width);
479 case QLNX_PCICFG_WRITE:
480 pci_write_config(ha->pci_dev, pci_cfg_rd_wr->reg,
481 pci_cfg_rd_wr->val, pci_cfg_rd_wr->width);
493 qlnx_mac_addr(qlnx_host_t *ha, qlnx_perm_mac_addr_t *mac_addr)
495 bzero(mac_addr->addr, sizeof(mac_addr->addr));
496 snprintf(mac_addr->addr, sizeof(mac_addr->addr),
497 "%02x:%02x:%02x:%02x:%02x:%02x",
498 ha->primary_mac[0], ha->primary_mac[1], ha->primary_mac[2],
499 ha->primary_mac[3], ha->primary_mac[4], ha->primary_mac[5]);
505 qlnx_get_regs(qlnx_host_t *ha, qlnx_get_regs_t *regs)
512 regs->reg_buf_len = 0;
513 outb = regs->reg_buf;
515 for (i = 0; i < ha->cdev.num_hwfns; i++) {
517 rval = qlnx_grc_dump(ha, &dwords, i);
522 regs->reg_buf_len += (dwords << 2);
524 rval = copyout(ha->grcdump[i], outb, ha->grcdump_size[i]);
529 ha->grcdump_dwords[i] = 0;
530 outb += regs->reg_buf_len;
533 ha->grcdump_taken = 0;
538 extern char qlnx_name_str[];
539 extern char qlnx_ver_str[];
542 qlnx_drv_info(qlnx_host_t *ha, qlnx_drvinfo_t *drv_info)
546 bzero(drv_info, sizeof(qlnx_drvinfo_t));
548 snprintf(drv_info->drv_name, sizeof(drv_info->drv_name), "%s",
550 snprintf(drv_info->drv_version, sizeof(drv_info->drv_version), "%s",
552 snprintf(drv_info->mfw_version, sizeof(drv_info->mfw_version), "%s",
554 snprintf(drv_info->stormfw_version, sizeof(drv_info->stormfw_version),
555 "%s", ha->stormfw_ver);
557 drv_info->eeprom_dump_len = ha->flash_size;
559 for (i = 0; i < ha->cdev.num_hwfns; i++) {
560 drv_info->reg_dump_len += ha->grcdump_size[i];
563 snprintf(drv_info->bus_info, sizeof(drv_info->bus_info),
564 "%d:%d:%d", pci_get_bus(ha->pci_dev),
565 pci_get_slot(ha->pci_dev), ha->pci_func);
571 qlnx_dev_settings(qlnx_host_t *ha, qlnx_dev_setting_t *dev_info)
573 struct ecore_hwfn *p_hwfn;
574 struct qlnx_link_output if_link;
576 p_hwfn = &ha->cdev.hwfns[0];
578 qlnx_fill_link(ha, p_hwfn, &if_link);
580 dev_info->supported = if_link.supported_caps;
581 dev_info->advertising = if_link.advertised_caps;
582 dev_info->speed = if_link.speed;
583 dev_info->duplex = if_link.duplex;
584 dev_info->port = ha->pci_func & 0x1;
585 dev_info->autoneg = if_link.autoneg;
591 qlnx_write_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram, uint32_t cmd)
596 if ((nvram->data == NULL) || (nvram->data_len == 0))
599 buf = qlnx_zalloc(nvram->data_len);
601 ret = copyin(nvram->data, buf, nvram->data_len);
603 QL_DPRINT9(ha, "issue cmd = 0x%x data = %p \
604 data_len = 0x%x ret = 0x%x exit\n",
605 cmd, nvram->data, nvram->data_len, ret);
608 ret = ecore_mcp_nvm_write(&ha->cdev, cmd,
609 nvram->offset, buf, nvram->data_len);
612 QL_DPRINT9(ha, "cmd = 0x%x data = %p \
613 data_len = 0x%x resp = 0x%x ret = 0x%x exit\n",
614 cmd, nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
616 free(buf, M_QLNXBUF);
622 qlnx_read_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
627 if ((nvram->data == NULL) || (nvram->data_len == 0))
630 buf = qlnx_zalloc(nvram->data_len);
632 ret = ecore_mcp_nvm_read(&ha->cdev, nvram->offset, buf,
635 QL_DPRINT9(ha, " data = %p data_len = 0x%x \
636 resp = 0x%x ret = 0x%x exit\n",
637 nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
640 ret = copyout(buf, nvram->data, nvram->data_len);
643 free(buf, M_QLNXBUF);
649 qlnx_get_nvram_resp(qlnx_host_t *ha, qlnx_nvram_t *nvram)
654 if ((nvram->data == NULL) || (nvram->data_len == 0))
657 buf = qlnx_zalloc(nvram->data_len);
660 ret = ecore_mcp_nvm_resp(&ha->cdev, buf);
662 QL_DPRINT9(ha, "data = %p data_len = 0x%x \
663 resp = 0x%x ret = 0x%x exit\n",
664 nvram->data, nvram->data_len, ha->cdev.mcp_nvm_resp, ret);
667 ret = copyout(buf, nvram->data, nvram->data_len);
670 free(buf, M_QLNXBUF);
676 qlnx_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
680 switch (nvram->cmd) {
682 case QLNX_NVRAM_CMD_WRITE_NVRAM:
683 ret = qlnx_write_nvram(ha, nvram, ECORE_NVM_WRITE_NVRAM);
686 case QLNX_NVRAM_CMD_PUT_FILE_DATA:
687 ret = qlnx_write_nvram(ha, nvram, ECORE_PUT_FILE_DATA);
690 case QLNX_NVRAM_CMD_READ_NVRAM:
691 ret = qlnx_read_nvram(ha, nvram);
694 case QLNX_NVRAM_CMD_SET_SECURE_MODE:
695 ret = ecore_mcp_nvm_set_secure_mode(&ha->cdev, nvram->offset);
697 QL_DPRINT9(ha, "QLNX_NVRAM_CMD_SET_SECURE_MODE \
698 resp = 0x%x ret = 0x%x exit\n",
699 ha->cdev.mcp_nvm_resp, ret);
702 case QLNX_NVRAM_CMD_DEL_FILE:
703 ret = ecore_mcp_nvm_del_file(&ha->cdev, nvram->offset);
705 QL_DPRINT9(ha, "QLNX_NVRAM_CMD_DEL_FILE \
706 resp = 0x%x ret = 0x%x exit\n",
707 ha->cdev.mcp_nvm_resp, ret);
710 case QLNX_NVRAM_CMD_PUT_FILE_BEGIN:
711 ret = ecore_mcp_nvm_put_file_begin(&ha->cdev, nvram->offset);
713 QL_DPRINT9(ha, "QLNX_NVRAM_CMD_PUT_FILE_BEGIN \
714 resp = 0x%x ret = 0x%x exit\n",
715 ha->cdev.mcp_nvm_resp, ret);
718 case QLNX_NVRAM_CMD_GET_NVRAM_RESP:
719 ret = qlnx_get_nvram_resp(ha, nvram);
731 qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
736 int stats_copied = 0;
738 s_stats->num_hwfns = ha->cdev.num_hwfns;
740 // if (ha->storm_stats_index < QLNX_STORM_STATS_SAMPLES_PER_HWFN)
743 s_stats->num_samples = ha->storm_stats_index;
745 for (i = 0; i < ha->cdev.num_hwfns; i++) {
747 index = (QLNX_STORM_STATS_SAMPLES_PER_HWFN * i);
749 if (s_stats->buffer[i]) {
751 ret = copyout(&ha->storm_stats[index],
753 QLNX_STORM_STATS_BYTES_PER_HWFN);
755 printf("%s [%d]: failed\n", __func__, i);
758 if (s_stats->num_samples ==
759 QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
761 bzero((void *)&ha->storm_stats[i],
762 QLNX_STORM_STATS_BYTES_PER_HWFN);
770 ha->storm_stats_index = 0;
775 #ifdef QLNX_USER_LLDP
778 qlnx_lldp_configure(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
779 struct ecore_ptt *p_ptt, uint32_t enable)
782 uint8_t lldp_mac[6] = {0};
783 struct ecore_lldp_config_params lldp_params;
784 struct ecore_lldp_sys_tlvs tlv_params;
786 ret = ecore_mcp_get_lldp_mac(p_hwfn, p_ptt, lldp_mac);
788 if (ret != ECORE_SUCCESS) {
789 device_printf(ha->pci_dev,
790 "%s: ecore_mcp_get_lldp_mac failed\n", __func__);
794 bzero(&lldp_params, sizeof(struct ecore_lldp_config_params));
795 bzero(&tlv_params, sizeof(struct ecore_lldp_sys_tlvs));
797 lldp_params.agent = ECORE_LLDP_NEAREST_BRIDGE;
798 lldp_params.tx_interval = 30; //Default value used as suggested by MFW
799 lldp_params.tx_hold = 4; //Default value used as suggested by MFW
800 lldp_params.tx_credit = 5; //Default value used as suggested by MFW
801 lldp_params.rx_enable = enable ? 1 : 0;
802 lldp_params.tx_enable = enable ? 1 : 0;
804 lldp_params.chassis_id_tlv[0] = 0;
805 lldp_params.chassis_id_tlv[0] |= (QLNX_LLDP_TYPE_CHASSIS_ID << 1);
806 lldp_params.chassis_id_tlv[0] |=
807 ((QLNX_LLDP_CHASSIS_ID_SUBTYPE_OCTETS +
808 QLNX_LLDP_CHASSIS_ID_MAC_ADDR_LEN) << 8);
809 lldp_params.chassis_id_tlv[0] |= (QLNX_LLDP_CHASSIS_ID_SUBTYPE_MAC << 16);
810 lldp_params.chassis_id_tlv[0] |= lldp_mac[0] << 24;
811 lldp_params.chassis_id_tlv[1] = lldp_mac[1] | (lldp_mac[2] << 8) |
812 (lldp_mac[3] << 16) | (lldp_mac[4] << 24);
813 lldp_params.chassis_id_tlv[2] = lldp_mac[5];
816 lldp_params.port_id_tlv[0] = 0;
817 lldp_params.port_id_tlv[0] |= (QLNX_LLDP_TYPE_PORT_ID << 1);
818 lldp_params.port_id_tlv[0] |=
819 ((QLNX_LLDP_PORT_ID_SUBTYPE_OCTETS +
820 QLNX_LLDP_PORT_ID_MAC_ADDR_LEN) << 8);
821 lldp_params.port_id_tlv[0] |= (QLNX_LLDP_PORT_ID_SUBTYPE_MAC << 16);
822 lldp_params.port_id_tlv[0] |= lldp_mac[0] << 24;
823 lldp_params.port_id_tlv[1] = lldp_mac[1] | (lldp_mac[2] << 8) |
824 (lldp_mac[3] << 16) | (lldp_mac[4] << 24);
825 lldp_params.port_id_tlv[2] = lldp_mac[5];
827 ret = ecore_lldp_set_params(p_hwfn, p_ptt, &lldp_params);
829 if (ret != ECORE_SUCCESS) {
830 device_printf(ha->pci_dev,
831 "%s: ecore_lldp_set_params failed\n", __func__);
835 //If LLDP is disable then disable discard_mandatory_tlv flag
837 tlv_params.discard_mandatory_tlv = false;
838 tlv_params.buf_size = 0;
839 ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
842 if (ret != ECORE_SUCCESS) {
843 device_printf(ha->pci_dev,
844 "%s: ecore_lldp_set_system_tlvs failed\n", __func__);
851 qlnx_register_default_lldp_tlvs(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
852 struct ecore_ptt *p_ptt)
856 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
857 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_CHASSIS_ID);
858 if (ret != ECORE_SUCCESS) {
859 device_printf(ha->pci_dev,
860 "%s: QLNX_LLDP_TYPE_CHASSIS_ID failed\n", __func__);
861 goto qlnx_register_default_lldp_tlvs_exit;
864 //register Port ID TLV
865 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
866 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_PORT_ID);
867 if (ret != ECORE_SUCCESS) {
868 device_printf(ha->pci_dev,
869 "%s: QLNX_LLDP_TYPE_PORT_ID failed\n", __func__);
870 goto qlnx_register_default_lldp_tlvs_exit;
874 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
875 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_TTL);
876 if (ret != ECORE_SUCCESS) {
877 device_printf(ha->pci_dev,
878 "%s: QLNX_LLDP_TYPE_TTL failed\n", __func__);
879 goto qlnx_register_default_lldp_tlvs_exit;
882 //register Port Description TLV
883 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
884 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_PORT_DESC);
885 if (ret != ECORE_SUCCESS) {
886 device_printf(ha->pci_dev,
887 "%s: QLNX_LLDP_TYPE_PORT_DESC failed\n", __func__);
888 goto qlnx_register_default_lldp_tlvs_exit;
891 //register System Name TLV
892 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
893 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_NAME);
894 if (ret != ECORE_SUCCESS) {
895 device_printf(ha->pci_dev,
896 "%s: QLNX_LLDP_TYPE_SYS_NAME failed\n", __func__);
897 goto qlnx_register_default_lldp_tlvs_exit;
900 //register System Description TLV
901 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
902 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_DESC);
903 if (ret != ECORE_SUCCESS) {
904 device_printf(ha->pci_dev,
905 "%s: QLNX_LLDP_TYPE_SYS_DESC failed\n", __func__);
906 goto qlnx_register_default_lldp_tlvs_exit;
909 //register System Capabilities TLV
910 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
911 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_SYS_CAPS);
912 if (ret != ECORE_SUCCESS) {
913 device_printf(ha->pci_dev,
914 "%s: QLNX_LLDP_TYPE_SYS_CAPS failed\n", __func__);
915 goto qlnx_register_default_lldp_tlvs_exit;
918 //register Management Address TLV
919 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
920 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_MGMT_ADDR);
921 if (ret != ECORE_SUCCESS) {
922 device_printf(ha->pci_dev,
923 "%s: QLNX_LLDP_TYPE_MGMT_ADDR failed\n", __func__);
924 goto qlnx_register_default_lldp_tlvs_exit;
927 //register Organizationally Specific TLVs
928 ret = ecore_lldp_register_tlv(p_hwfn, p_ptt,
929 ECORE_LLDP_NEAREST_BRIDGE, QLNX_LLDP_TYPE_ORG_SPECIFIC);
930 if (ret != ECORE_SUCCESS) {
931 device_printf(ha->pci_dev,
932 "%s: QLNX_LLDP_TYPE_ORG_SPECIFIC failed\n", __func__);
935 qlnx_register_default_lldp_tlvs_exit:
940 qlnx_set_lldp_tlvx(qlnx_host_t *ha, qlnx_lldp_sys_tlvs_t *lldp_tlvs)
943 struct ecore_hwfn *p_hwfn;
944 struct ecore_ptt *p_ptt;
945 struct ecore_lldp_sys_tlvs tlv_params;
947 p_hwfn = &ha->cdev.hwfns[0];
948 p_ptt = ecore_ptt_acquire(p_hwfn);
951 device_printf(ha->pci_dev,
952 "%s: ecore_ptt_acquire failed\n", __func__);
956 ret = qlnx_lldp_configure(ha, p_hwfn, p_ptt, 0);
959 device_printf(ha->pci_dev,
960 "%s: qlnx_lldp_configure disable failed\n", __func__);
961 goto qlnx_set_lldp_tlvx_exit;
964 ret = qlnx_register_default_lldp_tlvs(ha, p_hwfn, p_ptt);
967 device_printf(ha->pci_dev,
968 "%s: qlnx_register_default_lldp_tlvs failed\n",
970 goto qlnx_set_lldp_tlvx_exit;
973 ret = qlnx_lldp_configure(ha, p_hwfn, p_ptt, 1);
976 device_printf(ha->pci_dev,
977 "%s: qlnx_lldp_configure enable failed\n", __func__);
978 goto qlnx_set_lldp_tlvx_exit;
981 if (lldp_tlvs != NULL) {
982 bzero(&tlv_params, sizeof(struct ecore_lldp_sys_tlvs));
984 tlv_params.discard_mandatory_tlv =
985 (lldp_tlvs->discard_mandatory_tlv ? true: false);
986 tlv_params.buf_size = lldp_tlvs->buf_size;
987 memcpy(tlv_params.buf, lldp_tlvs->buf, lldp_tlvs->buf_size);
989 ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
992 device_printf(ha->pci_dev,
993 "%s: ecore_lldp_set_system_tlvs failed\n",
997 qlnx_set_lldp_tlvx_exit:
999 ecore_ptt_release(p_hwfn, p_ptt);
1003 #endif /* #ifdef QLNX_USER_LLDP */
1006 qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
1012 qlnx_trace_t *trace;
1015 if ((ha = (qlnx_host_t *)dev->si_drv1) == NULL)
1022 case QLNX_GRC_DUMP_SIZE:
1023 qlnx_get_grc_dump_size(ha, (qlnx_grcdump_t *)data);
1027 rval = qlnx_get_grc_dump(ha, (qlnx_grcdump_t *)data);
1030 case QLNX_IDLE_CHK_SIZE:
1031 qlnx_get_idle_chk_size(ha, (qlnx_idle_chk_t *)data);
1035 rval = qlnx_get_idle_chk(ha, (qlnx_idle_chk_t *)data);
1039 rval = qlnx_drv_info(ha, (qlnx_drvinfo_t *)data);
1042 case QLNX_DEV_SETTING:
1043 rval = qlnx_dev_settings(ha, (qlnx_dev_setting_t *)data);
1047 rval = qlnx_get_regs(ha, (qlnx_get_regs_t *)data);
1051 rval = qlnx_nvram(ha, (qlnx_nvram_t *)data);
1054 case QLNX_RD_WR_REG:
1055 rval = qlnx_reg_rd_wr(ha, (qlnx_reg_rd_wr_t *)data);
1058 case QLNX_RD_WR_PCICFG:
1059 rval = qlnx_rd_wr_pci_config(ha, (qlnx_pcicfg_rd_wr_t *)data);
1063 qlnx_mac_addr(ha, (qlnx_perm_mac_addr_t *)data);
1066 case QLNX_STORM_STATS:
1067 qlnx_storm_stats(ha, (qlnx_storm_stats_dump_t *)data);
1070 case QLNX_TRACE_SIZE:
1071 qlnx_get_trace_size(ha, (qlnx_trace_t *)data);
1075 trace = (qlnx_trace_t *)data;
1077 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1079 if (trace->size[i] && trace->cmd && trace->buffer[i])
1080 rval = qlnx_get_trace(ha, i, trace);
1087 #ifdef QLNX_USER_LLDP
1088 case QLNX_SET_LLDP_TLVS:
1089 rval = qlnx_set_lldp_tlvx(ha, (qlnx_lldp_sys_tlvs_t *)data);
1091 #endif /* #ifdef QLNX_USER_LLDP */