2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013-2016 Qlogic Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
31 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
41 #include "ql_inline.h"
47 static int ql_drvr_state(qla_host_t *ha, qla_driver_state_t *drvr_state);
48 static uint32_t ql_drvr_state_size(qla_host_t *ha);
49 static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
52 static struct cdevsw qla_cdevsw = {
53 .d_version = D_VERSION,
59 ql_make_cdev(qla_host_t *ha)
61 ha->ioctl_dev = make_dev(&qla_cdevsw,
69 if (ha->ioctl_dev == NULL)
72 ha->ioctl_dev->si_drv1 = ha;
78 ql_del_cdev(qla_host_t *ha)
80 if (ha->ioctl_dev != NULL)
81 destroy_dev(ha->ioctl_dev);
86 ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
95 q80_offchip_mem_val_t val;
96 qla_rd_pci_ids_t *pci_ids;
97 qla_rd_fw_dump_t *fw_dump;
102 qla_erase_flash_t *erf;
103 qla_offchip_mem_val_t *mem;
107 if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
110 pci_dev= ha->pci_dev;
116 u.rv = (qla_reg_val_t *)data;
120 u.rv->val = READ_REG32(ha, u.rv->reg);
122 WRITE_REG32(ha, u.rv->reg, u.rv->val);
125 if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val,
133 if (!ha->hw.flags.fdt_valid) {
138 u.rdf = (qla_rd_flash_t *)data;
139 if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data)))
152 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
157 if (!ha->hw.flags.fdt_valid) {
162 u.wrf = (qla_wr_flash_t *)data;
163 if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size,
165 printf("flash write failed[%d]\n", rval);
170 case QLA_ERASE_FLASH:
179 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
184 if (!ha->hw.flags.fdt_valid) {
189 u.erf = (qla_erase_flash_t *)data;
190 if ((rval = ql_erase_flash(ha, u.erf->off,
192 printf("flash erase failed[%d]\n", rval);
197 case QLA_RDWR_MS_MEM:
198 u.mem = (qla_offchip_mem_val_t *)data;
200 if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val,
204 u.mem->data_lo = val.data_lo;
205 u.mem->data_hi = val.data_hi;
206 u.mem->data_ulo = val.data_ulo;
207 u.mem->data_uhi = val.data_uhi;
212 case QLA_RD_FW_DUMP_SIZE:
214 if (ha->hw.mdump_init == 0) {
219 fw_dump = (qla_rd_fw_dump_t *)data;
220 fw_dump->minidump_size = ha->hw.mdump_buffer_size +
221 ha->hw.mdump_template_size;
222 fw_dump->pci_func = ha->pci_func;
228 if (ha->hw.mdump_init == 0) {
233 fw_dump = (qla_rd_fw_dump_t *)data;
235 if ((fw_dump->minidump == NULL) ||
236 (fw_dump->minidump_size != (ha->hw.mdump_buffer_size +
237 ha->hw.mdump_template_size))) {
242 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
243 if (!ha->hw.mdump_done)
244 ha->qla_initiate_recovery = 1;
245 QLA_UNLOCK(ha, __func__);
251 #define QLNX_DUMP_WAIT_SECS 30
253 count = QLNX_DUMP_WAIT_SECS * 1000;
256 if (ha->hw.mdump_done)
258 qla_mdelay(__func__, 100);
262 if (!ha->hw.mdump_done) {
267 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
268 ha->hw.mdump_done = 0;
269 QLA_UNLOCK(ha, __func__);
275 if ((rval = copyout(ha->hw.mdump_template,
276 fw_dump->minidump, ha->hw.mdump_template_size))) {
281 if ((rval = copyout(ha->hw.mdump_buffer,
282 ((uint8_t *)fw_dump->minidump +
283 ha->hw.mdump_template_size),
284 ha->hw.mdump_buffer_size)))
288 case QLA_RD_DRVR_STATE:
289 rval = ql_drvr_state(ha, (qla_driver_state_t *)data);
293 pci_ids = (qla_rd_pci_ids_t *)data;
294 pci_ids->ven_id = pci_get_vendor(pci_dev);
295 pci_ids->dev_id = pci_get_device(pci_dev);
296 pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev);
297 pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev);
298 pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1);
310 ql_drvr_state(qla_host_t *ha, qla_driver_state_t *state)
313 uint32_t drvr_state_size;
314 qla_drvr_state_hdr_t *hdr;
316 drvr_state_size = ql_drvr_state_size(ha);
318 if (state->buffer == NULL) {
319 state->size = drvr_state_size;
323 if (state->size < drvr_state_size)
326 if (ha->hw.drvr_state == NULL)
329 hdr = ha->hw.drvr_state;
331 if (!hdr->drvr_version_major)
332 ql_capture_drvr_state(ha);
334 rval = copyout(ha->hw.drvr_state, state->buffer, drvr_state_size);
336 bzero(ha->hw.drvr_state, drvr_state_size);
342 ql_drvr_state_size(qla_host_t *ha)
344 uint32_t drvr_state_size;
347 size = sizeof (qla_drvr_state_hdr_t);
348 drvr_state_size = QL_ALIGN(size, 64);
350 size = ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
351 drvr_state_size += QL_ALIGN(size, 64);
353 size = ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
354 drvr_state_size += QL_ALIGN(size, 64);
356 size = ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
357 drvr_state_size += QL_ALIGN(size, 64);
359 size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS * ha->hw.num_tx_rings;
360 drvr_state_size += QL_ALIGN(size, 64);
362 size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS * ha->hw.num_rds_rings;
363 drvr_state_size += QL_ALIGN(size, 64);
365 size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS *
366 ha->hw.num_sds_rings;
367 drvr_state_size += QL_ALIGN(size, 64);
369 return (drvr_state_size);
373 ql_get_tx_state(qla_host_t *ha, qla_drvr_state_tx_t *tx_state)
377 for (i = 0; i < ha->hw.num_tx_rings; i++) {
378 tx_state->base_p_addr = ha->hw.tx_cntxt[i].tx_ring_paddr;
379 tx_state->cons_p_addr = ha->hw.tx_cntxt[i].tx_cons_paddr;
380 tx_state->tx_prod_reg = ha->hw.tx_cntxt[i].tx_prod_reg;
381 tx_state->tx_cntxt_id = ha->hw.tx_cntxt[i].tx_cntxt_id;
382 tx_state->txr_free = ha->hw.tx_cntxt[i].txr_free;
383 tx_state->txr_next = ha->hw.tx_cntxt[i].txr_next;
384 tx_state->txr_comp = ha->hw.tx_cntxt[i].txr_comp;
391 ql_get_rx_state(qla_host_t *ha, qla_drvr_state_rx_t *rx_state)
395 for (i = 0; i < ha->hw.num_rds_rings; i++) {
396 rx_state->prod_std = ha->hw.rds[i].prod_std;
397 rx_state->rx_next = ha->hw.rds[i].rx_next;
404 ql_get_sds_state(qla_host_t *ha, qla_drvr_state_sds_t *sds_state)
408 for (i = 0; i < ha->hw.num_sds_rings; i++) {
409 sds_state->sdsr_next = ha->hw.sds[i].sdsr_next;
410 sds_state->sds_consumer = ha->hw.sds[i].sds_consumer;
417 ql_capture_drvr_state(qla_host_t *ha)
419 uint8_t *state_buffer;
421 uint32_t drvr_state_size;
422 qla_drvr_state_hdr_t *hdr;
426 drvr_state_size = ql_drvr_state_size(ha);
428 state_buffer = ha->hw.drvr_state;
430 if (state_buffer == NULL)
433 bzero(state_buffer, drvr_state_size);
435 hdr = (qla_drvr_state_hdr_t *)state_buffer;
437 hdr->drvr_version_major = QLA_VERSION_MAJOR;
438 hdr->drvr_version_minor = QLA_VERSION_MINOR;
439 hdr->drvr_version_build = QLA_VERSION_BUILD;
441 bcopy(ha->hw.mac_addr, hdr->mac_addr, ETHER_ADDR_LEN);
443 hdr->link_speed = ha->hw.link_speed;
444 hdr->cable_length = ha->hw.cable_length;
445 hdr->cable_oui = ha->hw.cable_oui;
446 hdr->link_up = ha->hw.link_up;
447 hdr->module_type = ha->hw.module_type;
448 hdr->link_faults = ha->hw.link_faults;
449 hdr->rcv_intr_coalesce = ha->hw.rcv_intr_coalesce;
450 hdr->xmt_intr_coalesce = ha->hw.xmt_intr_coalesce;
452 size = sizeof (qla_drvr_state_hdr_t);
453 hdr->tx_state_offset = QL_ALIGN(size, 64);
455 ptr = state_buffer + hdr->tx_state_offset;
457 ql_get_tx_state(ha, (qla_drvr_state_tx_t *)ptr);
459 size = ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
460 hdr->rx_state_offset = hdr->tx_state_offset + QL_ALIGN(size, 64);
461 ptr = state_buffer + hdr->rx_state_offset;
463 ql_get_rx_state(ha, (qla_drvr_state_rx_t *)ptr);
465 size = ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
466 hdr->sds_state_offset = hdr->rx_state_offset + QL_ALIGN(size, 64);
467 ptr = state_buffer + hdr->sds_state_offset;
469 ql_get_sds_state(ha, (qla_drvr_state_sds_t *)ptr);
471 size = ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
472 hdr->txr_offset = hdr->sds_state_offset + QL_ALIGN(size, 64);
473 ptr = state_buffer + hdr->txr_offset;
475 hdr->num_tx_rings = ha->hw.num_tx_rings;
476 hdr->txr_size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS;
477 hdr->txr_entries = NUM_TX_DESCRIPTORS;
479 size = hdr->num_tx_rings * hdr->txr_size;
480 bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size);
482 hdr->rxr_offset = hdr->txr_offset + QL_ALIGN(size, 64);
483 ptr = state_buffer + hdr->rxr_offset;
485 hdr->rxr_size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS;
486 hdr->rxr_entries = NUM_RX_DESCRIPTORS;
487 hdr->num_rx_rings = ha->hw.num_rds_rings;
489 for (i = 0; i < ha->hw.num_rds_rings; i++) {
490 bcopy(ha->hw.dma_buf.rds_ring[i].dma_b, ptr, hdr->rxr_size);
491 ptr += hdr->rxr_size;
494 size = hdr->rxr_size * hdr->num_rx_rings;
495 hdr->sds_offset = hdr->rxr_offset + QL_ALIGN(size, 64);
496 hdr->sds_ring_size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS;
497 hdr->sds_entries = NUM_STATUS_DESCRIPTORS;
498 hdr->num_sds_rings = ha->hw.num_sds_rings;
500 ptr = state_buffer + hdr->sds_offset;
501 for (i = 0; i < ha->hw.num_sds_rings; i++) {
502 bcopy(ha->hw.dma_buf.sds_ring[i].dma_b, ptr, hdr->sds_ring_size);
503 ptr += hdr->sds_ring_size;
509 ql_alloc_drvr_state_buffer(qla_host_t *ha)
511 uint32_t drvr_state_size;
513 drvr_state_size = ql_drvr_state_size(ha);
515 ha->hw.drvr_state = malloc(drvr_state_size, M_QLA83XXBUF, M_NOWAIT);
521 ql_free_drvr_state_buffer(qla_host_t *ha)
523 if (ha->hw.drvr_state != NULL)
524 free(ha->hw.drvr_state, M_QLA83XXBUF);