2 * Copyright (c) 2013-2016 Qlogic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
29 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
39 #include "ql_inline.h"
45 static int ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log);
46 static int ql_drvr_state(qla_host_t *ha, qla_driver_state_t *drvr_state);
47 static uint32_t ql_drvr_state_size(qla_host_t *ha);
48 static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
51 static struct cdevsw qla_cdevsw = {
52 .d_version = D_VERSION,
58 ql_make_cdev(qla_host_t *ha)
60 ha->ioctl_dev = make_dev(&qla_cdevsw,
68 if (ha->ioctl_dev == NULL)
71 ha->ioctl_dev->si_drv1 = ha;
77 ql_del_cdev(qla_host_t *ha)
79 if (ha->ioctl_dev != NULL)
80 destroy_dev(ha->ioctl_dev);
85 ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
94 q80_offchip_mem_val_t val;
95 qla_rd_pci_ids_t *pci_ids;
96 qla_rd_fw_dump_t *fw_dump;
101 qla_erase_flash_t *erf;
102 qla_offchip_mem_val_t *mem;
106 if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
109 pci_dev= ha->pci_dev;
115 u.rv = (qla_reg_val_t *)data;
119 u.rv->val = READ_REG32(ha, u.rv->reg);
121 WRITE_REG32(ha, u.rv->reg, u.rv->val);
124 if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val,
132 if (!ha->hw.flags.fdt_valid) {
137 u.rdf = (qla_rd_flash_t *)data;
138 if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data)))
151 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
156 if (!ha->hw.flags.fdt_valid) {
161 u.wrf = (qla_wr_flash_t *)data;
162 if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size,
164 printf("flash write failed[%d]\n", rval);
169 case QLA_ERASE_FLASH:
178 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
183 if (!ha->hw.flags.fdt_valid) {
188 u.erf = (qla_erase_flash_t *)data;
189 if ((rval = ql_erase_flash(ha, u.erf->off,
191 printf("flash erase failed[%d]\n", rval);
196 case QLA_RDWR_MS_MEM:
197 u.mem = (qla_offchip_mem_val_t *)data;
199 if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val,
203 u.mem->data_lo = val.data_lo;
204 u.mem->data_hi = val.data_hi;
205 u.mem->data_ulo = val.data_ulo;
206 u.mem->data_uhi = val.data_uhi;
211 case QLA_RD_FW_DUMP_SIZE:
213 if (ha->hw.mdump_init == 0) {
218 fw_dump = (qla_rd_fw_dump_t *)data;
219 fw_dump->minidump_size = ha->hw.mdump_buffer_size +
220 ha->hw.mdump_template_size;
221 fw_dump->pci_func = ha->pci_func;
227 if (ha->hw.mdump_init == 0) {
228 device_printf(pci_dev, "%s: minidump not initialized\n", __func__);
233 fw_dump = (qla_rd_fw_dump_t *)data;
235 if ((fw_dump->minidump == NULL) ||
236 (fw_dump->minidump_size != (ha->hw.mdump_buffer_size +
237 ha->hw.mdump_template_size))) {
238 device_printf(pci_dev,
239 "%s: minidump buffer [%p] size = [%d, %d] invalid\n", __func__,
240 fw_dump->minidump, fw_dump->minidump_size,
241 (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size));
246 if ((ha->pci_func & 0x1)) {
247 device_printf(pci_dev, "%s: mindump allowed only on Port0\n", __func__);
256 if (ha->enable_minidump)
260 fw_dump->usec_ts = ha->hw.mdump_usec_ts;
262 if (!ha->hw.mdump_done) {
263 device_printf(pci_dev,
264 "%s: port offline minidump failed\n", __func__);
270 #define QLA_LOCK_MDUMP_MS_TIMEOUT (QLA_LOCK_DEFAULT_MS_TIMEOUT * 5)
271 if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) {
272 if (!ha->hw.mdump_done) {
274 QL_INITIATE_RECOVERY(ha);
275 device_printf(pci_dev, "%s: recovery initiated "
276 " to trigger minidump\n",
279 QLA_UNLOCK(ha, __func__);
281 device_printf(pci_dev, "%s: QLA_LOCK() failed0\n", __func__);
286 #define QLNX_DUMP_WAIT_SECS 30
288 count = QLNX_DUMP_WAIT_SECS * 1000;
291 if (ha->hw.mdump_done)
293 qla_mdelay(__func__, 100);
297 if (!ha->hw.mdump_done) {
298 device_printf(pci_dev,
299 "%s: port not offline minidump failed\n", __func__);
303 fw_dump->usec_ts = ha->hw.mdump_usec_ts;
305 if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) {
306 ha->hw.mdump_done = 0;
307 QLA_UNLOCK(ha, __func__);
309 device_printf(pci_dev, "%s: QLA_LOCK() failed1\n", __func__);
315 if ((rval = copyout(ha->hw.mdump_template,
316 fw_dump->minidump, ha->hw.mdump_template_size))) {
317 device_printf(pci_dev, "%s: template copyout failed\n", __func__);
322 if ((rval = copyout(ha->hw.mdump_buffer,
323 ((uint8_t *)fw_dump->minidump +
324 ha->hw.mdump_template_size),
325 ha->hw.mdump_buffer_size))) {
326 device_printf(pci_dev, "%s: minidump copyout failed\n", __func__);
331 case QLA_RD_DRVR_STATE:
332 rval = ql_drvr_state(ha, (qla_driver_state_t *)data);
335 case QLA_RD_SLOWPATH_LOG:
336 rval = ql_slowpath_log(ha, (qla_sp_log_t *)data);
340 pci_ids = (qla_rd_pci_ids_t *)data;
341 pci_ids->ven_id = pci_get_vendor(pci_dev);
342 pci_ids->dev_id = pci_get_device(pci_dev);
343 pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev);
344 pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev);
345 pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1);
358 ql_drvr_state(qla_host_t *ha, qla_driver_state_t *state)
361 uint32_t drvr_state_size;
363 drvr_state_size = ql_drvr_state_size(ha);
365 if (state->buffer == NULL) {
366 state->size = drvr_state_size;
370 if (state->size < drvr_state_size)
373 if (ha->hw.drvr_state == NULL)
376 ql_capture_drvr_state(ha);
378 rval = copyout(ha->hw.drvr_state, state->buffer, drvr_state_size);
380 bzero(ha->hw.drvr_state, drvr_state_size);
386 ql_drvr_state_size(qla_host_t *ha)
388 uint32_t drvr_state_size;
391 size = sizeof (qla_drvr_state_hdr_t);
392 drvr_state_size = QL_ALIGN(size, 64);
394 size = ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
395 drvr_state_size += QL_ALIGN(size, 64);
397 size = ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
398 drvr_state_size += QL_ALIGN(size, 64);
400 size = ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
401 drvr_state_size += QL_ALIGN(size, 64);
403 size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS * ha->hw.num_tx_rings;
404 drvr_state_size += QL_ALIGN(size, 64);
406 size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS * ha->hw.num_rds_rings;
407 drvr_state_size += QL_ALIGN(size, 64);
409 size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS *
410 ha->hw.num_sds_rings;
411 drvr_state_size += QL_ALIGN(size, 64);
413 return (drvr_state_size);
417 ql_get_tx_state(qla_host_t *ha, qla_drvr_state_tx_t *tx_state)
421 for (i = 0; i < ha->hw.num_tx_rings; i++) {
422 tx_state->base_p_addr = ha->hw.tx_cntxt[i].tx_ring_paddr;
423 tx_state->cons_p_addr = ha->hw.tx_cntxt[i].tx_cons_paddr;
424 tx_state->tx_prod_reg = ha->hw.tx_cntxt[i].tx_prod_reg;
425 tx_state->tx_cntxt_id = ha->hw.tx_cntxt[i].tx_cntxt_id;
426 tx_state->txr_free = ha->hw.tx_cntxt[i].txr_free;
427 tx_state->txr_next = ha->hw.tx_cntxt[i].txr_next;
428 tx_state->txr_comp = ha->hw.tx_cntxt[i].txr_comp;
435 ql_get_rx_state(qla_host_t *ha, qla_drvr_state_rx_t *rx_state)
439 for (i = 0; i < ha->hw.num_rds_rings; i++) {
440 rx_state->prod_std = ha->hw.rds[i].prod_std;
441 rx_state->rx_next = ha->hw.rds[i].rx_next;
448 ql_get_sds_state(qla_host_t *ha, qla_drvr_state_sds_t *sds_state)
452 for (i = 0; i < ha->hw.num_sds_rings; i++) {
453 sds_state->sdsr_next = ha->hw.sds[i].sdsr_next;
454 sds_state->sds_consumer = ha->hw.sds[i].sds_consumer;
461 ql_capture_drvr_state(qla_host_t *ha)
463 uint8_t *state_buffer;
465 qla_drvr_state_hdr_t *hdr;
469 state_buffer = ha->hw.drvr_state;
471 if (state_buffer == NULL)
474 hdr = (qla_drvr_state_hdr_t *)state_buffer;
478 if (hdr->drvr_version_major) {
483 hdr->usec_ts = qla_get_usec_timestamp();
485 hdr->drvr_version_major = QLA_VERSION_MAJOR;
486 hdr->drvr_version_minor = QLA_VERSION_MINOR;
487 hdr->drvr_version_build = QLA_VERSION_BUILD;
489 bcopy(ha->hw.mac_addr, hdr->mac_addr, ETHER_ADDR_LEN);
491 hdr->link_speed = ha->hw.link_speed;
492 hdr->cable_length = ha->hw.cable_length;
493 hdr->cable_oui = ha->hw.cable_oui;
494 hdr->link_up = ha->hw.link_up;
495 hdr->module_type = ha->hw.module_type;
496 hdr->link_faults = ha->hw.link_faults;
497 hdr->rcv_intr_coalesce = ha->hw.rcv_intr_coalesce;
498 hdr->xmt_intr_coalesce = ha->hw.xmt_intr_coalesce;
500 size = sizeof (qla_drvr_state_hdr_t);
501 hdr->tx_state_offset = QL_ALIGN(size, 64);
503 ptr = state_buffer + hdr->tx_state_offset;
505 ql_get_tx_state(ha, (qla_drvr_state_tx_t *)ptr);
507 size = ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
508 hdr->rx_state_offset = hdr->tx_state_offset + QL_ALIGN(size, 64);
509 ptr = state_buffer + hdr->rx_state_offset;
511 ql_get_rx_state(ha, (qla_drvr_state_rx_t *)ptr);
513 size = ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
514 hdr->sds_state_offset = hdr->rx_state_offset + QL_ALIGN(size, 64);
515 ptr = state_buffer + hdr->sds_state_offset;
517 ql_get_sds_state(ha, (qla_drvr_state_sds_t *)ptr);
519 size = ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
520 hdr->txr_offset = hdr->sds_state_offset + QL_ALIGN(size, 64);
521 ptr = state_buffer + hdr->txr_offset;
523 hdr->num_tx_rings = ha->hw.num_tx_rings;
524 hdr->txr_size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS;
525 hdr->txr_entries = NUM_TX_DESCRIPTORS;
527 size = hdr->num_tx_rings * hdr->txr_size;
528 bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size);
530 hdr->rxr_offset = hdr->txr_offset + QL_ALIGN(size, 64);
531 ptr = state_buffer + hdr->rxr_offset;
533 hdr->rxr_size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS;
534 hdr->rxr_entries = NUM_RX_DESCRIPTORS;
535 hdr->num_rx_rings = ha->hw.num_rds_rings;
537 for (i = 0; i < ha->hw.num_rds_rings; i++) {
538 bcopy(ha->hw.dma_buf.rds_ring[i].dma_b, ptr, hdr->rxr_size);
539 ptr += hdr->rxr_size;
542 size = hdr->rxr_size * hdr->num_rx_rings;
543 hdr->sds_offset = hdr->rxr_offset + QL_ALIGN(size, 64);
544 hdr->sds_ring_size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS;
545 hdr->sds_entries = NUM_STATUS_DESCRIPTORS;
546 hdr->num_sds_rings = ha->hw.num_sds_rings;
548 ptr = state_buffer + hdr->sds_offset;
549 for (i = 0; i < ha->hw.num_sds_rings; i++) {
550 bcopy(ha->hw.dma_buf.sds_ring[i].dma_b, ptr, hdr->sds_ring_size);
551 ptr += hdr->sds_ring_size;
557 ql_alloc_drvr_state_buffer(qla_host_t *ha)
559 uint32_t drvr_state_size;
561 drvr_state_size = ql_drvr_state_size(ha);
563 ha->hw.drvr_state = malloc(drvr_state_size, M_QLA83XXBUF, M_NOWAIT);
565 if (ha->hw.drvr_state != NULL)
566 bzero(ha->hw.drvr_state, drvr_state_size);
572 ql_free_drvr_state_buffer(qla_host_t *ha)
574 if (ha->hw.drvr_state != NULL)
575 free(ha->hw.drvr_state, M_QLA83XXBUF);
580 ql_sp_log(qla_host_t *ha, uint16_t fmtstr_idx, uint16_t num_params,
581 uint32_t param0, uint32_t param1, uint32_t param2, uint32_t param3,
584 qla_sp_log_entry_t *sp_e, *sp_log;
586 if (((sp_log = ha->hw.sp_log) == NULL) || ha->hw.sp_log_stop)
589 mtx_lock(&ha->sp_log_lock);
591 sp_e = &sp_log[ha->hw.sp_log_index];
593 bzero(sp_e, sizeof (qla_sp_log_entry_t));
595 sp_e->fmtstr_idx = fmtstr_idx;
596 sp_e->num_params = num_params;
598 sp_e->usec_ts = qla_get_usec_timestamp();
600 sp_e->params[0] = param0;
601 sp_e->params[1] = param1;
602 sp_e->params[2] = param2;
603 sp_e->params[3] = param3;
604 sp_e->params[4] = param4;
606 ha->hw.sp_log_index = (ha->hw.sp_log_index + 1) & (NUM_LOG_ENTRIES - 1);
608 if (ha->hw.sp_log_num_entries < NUM_LOG_ENTRIES)
609 ha->hw.sp_log_num_entries++;
611 mtx_unlock(&ha->sp_log_lock);
617 ql_alloc_sp_log_buffer(qla_host_t *ha)
621 size = (sizeof(qla_sp_log_entry_t)) * NUM_LOG_ENTRIES;
623 ha->hw.sp_log = malloc(size, M_QLA83XXBUF, M_NOWAIT);
625 if (ha->hw.sp_log != NULL)
626 bzero(ha->hw.sp_log, size);
628 ha->hw.sp_log_index = 0;
629 ha->hw.sp_log_num_entries = 0;
635 ql_free_sp_log_buffer(qla_host_t *ha)
637 if (ha->hw.sp_log != NULL)
638 free(ha->hw.sp_log, M_QLA83XXBUF);
643 ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log)
648 if ((ha->hw.sp_log == NULL) || (log->buffer == NULL))
651 size = (sizeof(qla_sp_log_entry_t) * NUM_LOG_ENTRIES);
653 mtx_lock(&ha->sp_log_lock);
655 rval = copyout(ha->hw.sp_log, log->buffer, size);
658 log->next_idx = ha->hw.sp_log_index;
659 log->num_entries = ha->hw.sp_log_num_entries;
661 device_printf(ha->pci_dev,
662 "%s: exit [rval = %d][%p, next_idx = %d, %d entries, %d bytes]\n",
663 __func__, rval, log->buffer, log->next_idx, log->num_entries, size);
664 mtx_unlock(&ha->sp_log_lock);