]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qlxgbe/ql_ioctl.c
Merge OpenSSL 1.0.2o.
[FreeBSD/FreeBSD.git] / sys / dev / qlxgbe / ql_ioctl.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2016 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 /*
30  * File: ql_ioctl.c
31  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37
38 #include "ql_os.h"
39 #include "ql_hw.h"
40 #include "ql_def.h"
41 #include "ql_inline.h"
42 #include "ql_glbl.h"
43 #include "ql_ioctl.h"
44 #include "ql_ver.h"
45 #include "ql_dbg.h"
46
47 static int ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log);
48 static int ql_drvr_state(qla_host_t *ha, qla_driver_state_t *drvr_state);
49 static uint32_t ql_drvr_state_size(qla_host_t *ha);
50 static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
51                 struct thread *td);
52
53 static struct cdevsw qla_cdevsw = {
54         .d_version = D_VERSION,
55         .d_ioctl = ql_eioctl,
56         .d_name = "qlcnic",
57 };
58
59 int
60 ql_make_cdev(qla_host_t *ha)
61 {
62         ha->ioctl_dev = make_dev(&qla_cdevsw,
63                                 ha->ifp->if_dunit,
64                                 UID_ROOT,
65                                 GID_WHEEL,
66                                 0600,
67                                 "%s",
68                                 if_name(ha->ifp));
69
70         if (ha->ioctl_dev == NULL)
71                 return (-1);
72
73         ha->ioctl_dev->si_drv1 = ha;
74
75         return (0);
76 }
77
78 void
79 ql_del_cdev(qla_host_t *ha)
80 {
81         if (ha->ioctl_dev != NULL)
82                 destroy_dev(ha->ioctl_dev);
83         return;
84 }
85
86 static int
87 ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
88         struct thread *td)
89 {
90         qla_host_t *ha;
91         int rval = 0;
92         device_t pci_dev;
93         struct ifnet *ifp;
94         int count;
95
96         q80_offchip_mem_val_t val;
97         qla_rd_pci_ids_t *pci_ids;
98         qla_rd_fw_dump_t *fw_dump;
99         union {
100                 qla_reg_val_t *rv;
101                 qla_rd_flash_t *rdf;
102                 qla_wr_flash_t *wrf;
103                 qla_erase_flash_t *erf;
104                 qla_offchip_mem_val_t *mem;
105         } u;
106
107
108         if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
109                 return ENXIO;
110
111         pci_dev= ha->pci_dev;
112
113         switch(cmd) {
114
115         case QLA_RDWR_REG:
116
117                 u.rv = (qla_reg_val_t *)data;
118
119                 if (u.rv->direct) {
120                         if (u.rv->rd) {
121                                 u.rv->val = READ_REG32(ha, u.rv->reg);
122                         } else {
123                                 WRITE_REG32(ha, u.rv->reg, u.rv->val);
124                         }
125                 } else {
126                         if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val,
127                                 u.rv->rd)))
128                                 rval = ENXIO;
129                 }
130                 break;
131
132         case QLA_RD_FLASH:
133
134                 if (!ha->hw.flags.fdt_valid) {
135                         rval = EIO;
136                         break;
137                 }       
138
139                 u.rdf = (qla_rd_flash_t *)data;
140                 if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data)))
141                         rval = ENXIO;
142                 break;
143
144         case QLA_WR_FLASH:
145
146                 ifp = ha->ifp;
147
148                 if (ifp == NULL) {
149                         rval = ENXIO;
150                         break;
151                 }
152
153                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
154                         rval = ENXIO;
155                         break;
156                 }
157
158                 if (!ha->hw.flags.fdt_valid) {
159                         rval = EIO;
160                         break;
161                 }       
162
163                 u.wrf = (qla_wr_flash_t *)data;
164                 if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size,
165                         u.wrf->buffer))) {
166                         printf("flash write failed[%d]\n", rval);
167                         rval = ENXIO;
168                 }
169                 break;
170
171         case QLA_ERASE_FLASH:
172
173                 ifp = ha->ifp;
174
175                 if (ifp == NULL) {
176                         rval = ENXIO;
177                         break;
178                 }
179
180                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
181                         rval = ENXIO;
182                         break;
183                 }
184
185                 if (!ha->hw.flags.fdt_valid) {
186                         rval = EIO;
187                         break;
188                 }       
189                 
190                 u.erf = (qla_erase_flash_t *)data;
191                 if ((rval = ql_erase_flash(ha, u.erf->off, 
192                         u.erf->size))) {
193                         printf("flash erase failed[%d]\n", rval);
194                         rval = ENXIO;
195                 }
196                 break;
197
198         case QLA_RDWR_MS_MEM:
199                 u.mem = (qla_offchip_mem_val_t *)data;
200
201                 if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val, 
202                         u.mem->rd)))
203                         rval = ENXIO;
204                 else {
205                         u.mem->data_lo = val.data_lo;
206                         u.mem->data_hi = val.data_hi;
207                         u.mem->data_ulo = val.data_ulo;
208                         u.mem->data_uhi = val.data_uhi;
209                 }
210
211                 break;
212
213         case QLA_RD_FW_DUMP_SIZE:
214
215                 if (ha->hw.mdump_init == 0) {
216                         rval = EINVAL;
217                         break;
218                 }
219                 
220                 fw_dump = (qla_rd_fw_dump_t *)data;
221                 fw_dump->minidump_size = ha->hw.mdump_buffer_size + 
222                                                 ha->hw.mdump_template_size;
223                 fw_dump->pci_func = ha->pci_func;
224
225                 break;
226
227         case QLA_RD_FW_DUMP:
228
229                 if (ha->hw.mdump_init == 0) {
230                         device_printf(pci_dev, "%s: minidump not initialized\n", __func__);
231                         rval = EINVAL;
232                         break;
233                 }
234                 
235                 fw_dump = (qla_rd_fw_dump_t *)data;
236
237                 if ((fw_dump->minidump == NULL) ||
238                         (fw_dump->minidump_size != (ha->hw.mdump_buffer_size +
239                                 ha->hw.mdump_template_size))) {
240                         device_printf(pci_dev,
241                                 "%s: minidump buffer [%p] size = [%d, %d] invalid\n", __func__,
242                                 fw_dump->minidump, fw_dump->minidump_size,
243                                 (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size));
244                         rval = EINVAL;
245                         break;
246                 }
247
248                 if ((ha->pci_func & 0x1)) {
249                         device_printf(pci_dev, "%s: mindump allowed only on Port0\n", __func__);
250                         rval = ENXIO;
251                         break;
252                 }
253
254                 fw_dump->saved = 1;
255
256                 if (ha->offline) {
257
258                         if (ha->enable_minidump)
259                                 ql_minidump(ha);
260
261                         fw_dump->saved = 0;
262                         fw_dump->usec_ts = ha->hw.mdump_usec_ts;
263
264                         if (!ha->hw.mdump_done) {
265                                 device_printf(pci_dev,
266                                         "%s: port offline minidump failed\n", __func__);
267                                 rval = ENXIO;
268                                 break;
269                         }
270                 } else {
271
272                         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
273                                 if (!ha->hw.mdump_done) {
274                                         fw_dump->saved = 0;
275                                         QL_INITIATE_RECOVERY(ha);
276                                         device_printf(pci_dev, "%s: recovery initiated "
277                                                 " to trigger minidump\n",
278                                                 __func__);
279                                 }
280                                 QLA_UNLOCK(ha, __func__);
281                         } else {
282                                 device_printf(pci_dev, "%s: QLA_LOCK() failed0\n", __func__);
283                                 rval = ENXIO;
284                                 break;
285                         }
286         
287 #define QLNX_DUMP_WAIT_SECS     30
288
289                         count = QLNX_DUMP_WAIT_SECS * 1000;
290
291                         while (count) {
292                                 if (ha->hw.mdump_done)
293                                         break;
294                                 qla_mdelay(__func__, 100);
295                                 count -= 100;
296                         }
297
298                         if (!ha->hw.mdump_done) {
299                                 device_printf(pci_dev,
300                                         "%s: port not offline minidump failed\n", __func__);
301                                 rval = ENXIO;
302                                 break;
303                         }
304                         fw_dump->usec_ts = ha->hw.mdump_usec_ts;
305                         
306                         if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) {
307                                 ha->hw.mdump_done = 0;
308                                 QLA_UNLOCK(ha, __func__);
309                         } else {
310                                 device_printf(pci_dev, "%s: QLA_LOCK() failed1\n", __func__);
311                                 rval = ENXIO;
312                                 break;
313                         }
314                 }
315
316                 if ((rval = copyout(ha->hw.mdump_template,
317                         fw_dump->minidump, ha->hw.mdump_template_size))) {
318                         device_printf(pci_dev, "%s: template copyout failed\n", __func__);
319                         rval = ENXIO;
320                         break;
321                 }
322
323                 if ((rval = copyout(ha->hw.mdump_buffer,
324                                 ((uint8_t *)fw_dump->minidump +
325                                         ha->hw.mdump_template_size),
326                                 ha->hw.mdump_buffer_size))) {
327                         device_printf(pci_dev, "%s: minidump copyout failed\n", __func__);
328                         rval = ENXIO;
329                 }
330                 break;
331
332         case QLA_RD_DRVR_STATE:
333                 rval = ql_drvr_state(ha, (qla_driver_state_t *)data);
334                 break;
335
336         case QLA_RD_SLOWPATH_LOG:
337                 rval = ql_slowpath_log(ha, (qla_sp_log_t *)data);
338                 break;
339
340         case QLA_RD_PCI_IDS:
341                 pci_ids = (qla_rd_pci_ids_t *)data;
342                 pci_ids->ven_id = pci_get_vendor(pci_dev);
343                 pci_ids->dev_id = pci_get_device(pci_dev);
344                 pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev);
345                 pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev);
346                 pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1);
347                 break;
348
349         default:
350                 break;
351         }
352
353         return rval;
354 }
355
356
357
358 static int
359 ql_drvr_state(qla_host_t *ha, qla_driver_state_t *state)
360 {
361         int rval = 0;
362         uint32_t drvr_state_size;
363
364         drvr_state_size = ql_drvr_state_size(ha);
365
366         if (state->buffer == NULL) {
367                 state->size = drvr_state_size;
368                 return (0);
369         }
370                 
371         if (state->size < drvr_state_size)
372                 return (ENXIO);
373
374         if (ha->hw.drvr_state == NULL)
375                 return (ENOMEM);
376
377         ql_capture_drvr_state(ha);
378
379         rval = copyout(ha->hw.drvr_state, state->buffer, drvr_state_size);
380
381         bzero(ha->hw.drvr_state, drvr_state_size);
382
383         return (rval);
384 }
385
386 static uint32_t
387 ql_drvr_state_size(qla_host_t *ha)
388 {
389         uint32_t drvr_state_size;
390         uint32_t size;
391
392         size = sizeof (qla_drvr_state_hdr_t);
393         drvr_state_size = QL_ALIGN(size, 64);
394
395         size =  ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
396         drvr_state_size += QL_ALIGN(size, 64);
397
398         size =  ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
399         drvr_state_size += QL_ALIGN(size, 64);
400
401         size =  ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
402         drvr_state_size += QL_ALIGN(size, 64);
403
404         size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS * ha->hw.num_tx_rings;
405         drvr_state_size += QL_ALIGN(size, 64);
406
407         size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS * ha->hw.num_rds_rings;
408         drvr_state_size += QL_ALIGN(size, 64);
409
410         size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS *
411                         ha->hw.num_sds_rings;
412         drvr_state_size += QL_ALIGN(size, 64);
413
414         return (drvr_state_size);
415 }
416
417 static void
418 ql_get_tx_state(qla_host_t *ha, qla_drvr_state_tx_t *tx_state)
419 {
420         int i;
421
422         for (i = 0; i < ha->hw.num_tx_rings; i++) {
423                 tx_state->base_p_addr = ha->hw.tx_cntxt[i].tx_ring_paddr;
424                 tx_state->cons_p_addr = ha->hw.tx_cntxt[i].tx_cons_paddr;
425                 tx_state->tx_prod_reg = ha->hw.tx_cntxt[i].tx_prod_reg;
426                 tx_state->tx_cntxt_id = ha->hw.tx_cntxt[i].tx_cntxt_id;
427                 tx_state->txr_free = ha->hw.tx_cntxt[i].txr_free;
428                 tx_state->txr_next = ha->hw.tx_cntxt[i].txr_next;
429                 tx_state->txr_comp = ha->hw.tx_cntxt[i].txr_comp;
430                 tx_state++;
431         }
432         return;
433 }
434
435 static void
436 ql_get_rx_state(qla_host_t *ha, qla_drvr_state_rx_t *rx_state)
437 {
438         int i;
439
440         for (i = 0; i < ha->hw.num_rds_rings; i++) {
441                 rx_state->prod_std = ha->hw.rds[i].prod_std;
442                 rx_state->rx_next = ha->hw.rds[i].rx_next;
443                 rx_state++;
444         }
445         return;
446 }
447
448 static void
449 ql_get_sds_state(qla_host_t *ha, qla_drvr_state_sds_t *sds_state)
450 {
451         int i;
452
453         for (i = 0; i < ha->hw.num_sds_rings; i++) {
454                 sds_state->sdsr_next = ha->hw.sds[i].sdsr_next;
455                 sds_state->sds_consumer = ha->hw.sds[i].sds_consumer;
456                 sds_state++;
457         }
458         return;
459 }
460
461 void
462 ql_capture_drvr_state(qla_host_t *ha)
463 {
464         uint8_t *state_buffer;
465         uint8_t *ptr;
466         qla_drvr_state_hdr_t *hdr;
467         uint32_t size;
468         int i;
469
470         state_buffer =  ha->hw.drvr_state;
471
472         if (state_buffer == NULL)
473                 return;
474
475         hdr = (qla_drvr_state_hdr_t *)state_buffer;
476         
477         hdr->saved = 0;
478
479         if (hdr->drvr_version_major) {
480                 hdr->saved = 1;
481                 return;
482         }
483
484         hdr->usec_ts = qla_get_usec_timestamp();
485
486         hdr->drvr_version_major = QLA_VERSION_MAJOR;
487         hdr->drvr_version_minor = QLA_VERSION_MINOR;
488         hdr->drvr_version_build = QLA_VERSION_BUILD;
489
490         bcopy(ha->hw.mac_addr, hdr->mac_addr, ETHER_ADDR_LEN);
491
492         hdr->link_speed = ha->hw.link_speed;
493         hdr->cable_length = ha->hw.cable_length;
494         hdr->cable_oui = ha->hw.cable_oui;
495         hdr->link_up = ha->hw.link_up;
496         hdr->module_type = ha->hw.module_type;
497         hdr->link_faults = ha->hw.link_faults;
498         hdr->rcv_intr_coalesce = ha->hw.rcv_intr_coalesce;
499         hdr->xmt_intr_coalesce = ha->hw.xmt_intr_coalesce;
500
501         size = sizeof (qla_drvr_state_hdr_t);
502         hdr->tx_state_offset = QL_ALIGN(size, 64);
503
504         ptr = state_buffer + hdr->tx_state_offset;
505
506         ql_get_tx_state(ha, (qla_drvr_state_tx_t *)ptr);
507
508         size =  ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
509         hdr->rx_state_offset = hdr->tx_state_offset + QL_ALIGN(size, 64);
510         ptr = state_buffer + hdr->rx_state_offset;
511
512         ql_get_rx_state(ha, (qla_drvr_state_rx_t *)ptr);
513
514         size =  ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
515         hdr->sds_state_offset = hdr->rx_state_offset + QL_ALIGN(size, 64);
516         ptr = state_buffer + hdr->sds_state_offset;
517
518         ql_get_sds_state(ha, (qla_drvr_state_sds_t *)ptr);
519
520         size =  ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
521         hdr->txr_offset = hdr->sds_state_offset + QL_ALIGN(size, 64);
522         ptr = state_buffer + hdr->txr_offset;
523
524         hdr->num_tx_rings = ha->hw.num_tx_rings;
525         hdr->txr_size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS;
526         hdr->txr_entries = NUM_TX_DESCRIPTORS;
527
528         size = hdr->num_tx_rings * hdr->txr_size;
529         bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size);
530
531         hdr->rxr_offset = hdr->txr_offset + QL_ALIGN(size, 64);
532         ptr = state_buffer + hdr->rxr_offset;
533
534         hdr->rxr_size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS;
535         hdr->rxr_entries = NUM_RX_DESCRIPTORS;
536         hdr->num_rx_rings = ha->hw.num_rds_rings;
537
538         for (i = 0; i < ha->hw.num_rds_rings; i++) {
539                 bcopy(ha->hw.dma_buf.rds_ring[i].dma_b, ptr, hdr->rxr_size);
540                 ptr += hdr->rxr_size;
541         }
542
543         size = hdr->rxr_size * hdr->num_rx_rings;
544         hdr->sds_offset = hdr->rxr_offset + QL_ALIGN(size, 64);
545         hdr->sds_ring_size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS;
546         hdr->sds_entries = NUM_STATUS_DESCRIPTORS;
547         hdr->num_sds_rings = ha->hw.num_sds_rings;
548
549         ptr = state_buffer + hdr->sds_offset;
550         for (i = 0; i < ha->hw.num_sds_rings; i++) {
551                 bcopy(ha->hw.dma_buf.sds_ring[i].dma_b, ptr, hdr->sds_ring_size);
552                 ptr += hdr->sds_ring_size;
553         }
554         return;
555 }
556
557 void
558 ql_alloc_drvr_state_buffer(qla_host_t *ha)
559 {
560         uint32_t drvr_state_size;
561
562         drvr_state_size = ql_drvr_state_size(ha);
563
564         ha->hw.drvr_state =  malloc(drvr_state_size, M_QLA83XXBUF, M_NOWAIT);   
565
566         if (ha->hw.drvr_state != NULL)
567                 bzero(ha->hw.drvr_state, drvr_state_size);
568
569         return;
570 }
571
572 void
573 ql_free_drvr_state_buffer(qla_host_t *ha)
574 {
575         if (ha->hw.drvr_state != NULL)
576                 free(ha->hw.drvr_state, M_QLA83XXBUF);
577         return;
578 }
579
580 void
581 ql_sp_log(qla_host_t *ha, uint16_t fmtstr_idx, uint16_t num_params,
582         uint32_t param0, uint32_t param1, uint32_t param2, uint32_t param3,
583         uint32_t param4)
584 {
585         qla_sp_log_entry_t *sp_e, *sp_log;
586
587         if (((sp_log = ha->hw.sp_log) == NULL) || ha->hw.sp_log_stop)
588                 return;
589
590         mtx_lock(&ha->sp_log_lock);
591
592         sp_e = &sp_log[ha->hw.sp_log_index];
593
594         bzero(sp_e, sizeof (qla_sp_log_entry_t));
595
596         sp_e->fmtstr_idx = fmtstr_idx;
597         sp_e->num_params = num_params;
598
599         sp_e->usec_ts = qla_get_usec_timestamp();
600
601         sp_e->params[0] = param0;
602         sp_e->params[1] = param1;
603         sp_e->params[2] = param2;
604         sp_e->params[3] = param3;
605         sp_e->params[4] = param4;
606
607         ha->hw.sp_log_index = (ha->hw.sp_log_index + 1) & (NUM_LOG_ENTRIES - 1);
608
609         if (ha->hw.sp_log_num_entries < NUM_LOG_ENTRIES)
610                 ha->hw.sp_log_num_entries++;
611
612         mtx_unlock(&ha->sp_log_lock);
613
614         return;
615 }
616
617 void
618 ql_alloc_sp_log_buffer(qla_host_t *ha)
619 {
620         uint32_t size;
621
622         size = (sizeof(qla_sp_log_entry_t)) * NUM_LOG_ENTRIES;
623
624         ha->hw.sp_log =  malloc(size, M_QLA83XXBUF, M_NOWAIT);  
625
626         if (ha->hw.sp_log != NULL)
627                 bzero(ha->hw.sp_log, size);
628
629         ha->hw.sp_log_index = 0;
630         ha->hw.sp_log_num_entries = 0;
631
632         return;
633 }
634
635 void
636 ql_free_sp_log_buffer(qla_host_t *ha)
637 {
638         if (ha->hw.sp_log != NULL)
639                 free(ha->hw.sp_log, M_QLA83XXBUF);
640         return;
641 }
642
643 static int
644 ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log)
645 {
646         int rval = 0;
647         uint32_t size;
648
649         if ((ha->hw.sp_log == NULL) || (log->buffer == NULL))
650                 return (EINVAL);
651
652         size = (sizeof(qla_sp_log_entry_t) * NUM_LOG_ENTRIES);
653
654         mtx_lock(&ha->sp_log_lock);
655
656         rval = copyout(ha->hw.sp_log, log->buffer, size);
657
658         if (!rval) {
659                 log->next_idx = ha->hw.sp_log_index;
660                 log->num_entries = ha->hw.sp_log_num_entries;
661         }
662         device_printf(ha->pci_dev,
663                 "%s: exit [rval = %d][%p, next_idx = %d, %d entries, %d bytes]\n",
664                 __func__, rval, log->buffer, log->next_idx, log->num_entries, size);
665         mtx_unlock(&ha->sp_log_lock);
666
667         return (rval);
668 }
669