]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/dev/qlxgbe/ql_ioctl.c
MFC r331739
[FreeBSD/stable/9.git] / sys / dev / qlxgbe / ql_ioctl.c
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 /*
28  * File: ql_ioctl.c
29  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35
36 #include "ql_os.h"
37 #include "ql_hw.h"
38 #include "ql_def.h"
39 #include "ql_inline.h"
40 #include "ql_glbl.h"
41 #include "ql_ioctl.h"
42 #include "ql_ver.h"
43 #include "ql_dbg.h"
44
45 static int ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log);
46 static int ql_drvr_state(qla_host_t *ha, qla_driver_state_t *drvr_state);
47 static uint32_t ql_drvr_state_size(qla_host_t *ha);
48 static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
49                 struct thread *td);
50
51 static struct cdevsw qla_cdevsw = {
52         .d_version = D_VERSION,
53         .d_ioctl = ql_eioctl,
54         .d_name = "qlcnic",
55 };
56
57 int
58 ql_make_cdev(qla_host_t *ha)
59 {
60         ha->ioctl_dev = make_dev(&qla_cdevsw,
61                                 ha->ifp->if_dunit,
62                                 UID_ROOT,
63                                 GID_WHEEL,
64                                 0600,
65                                 "%s",
66                                 if_name(ha->ifp));
67
68         if (ha->ioctl_dev == NULL)
69                 return (-1);
70
71         ha->ioctl_dev->si_drv1 = ha;
72
73         return (0);
74 }
75
76 void
77 ql_del_cdev(qla_host_t *ha)
78 {
79         if (ha->ioctl_dev != NULL)
80                 destroy_dev(ha->ioctl_dev);
81         return;
82 }
83
84 static int
85 ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
86         struct thread *td)
87 {
88         qla_host_t *ha;
89         int rval = 0;
90         device_t pci_dev;
91         struct ifnet *ifp;
92         int count;
93
94         q80_offchip_mem_val_t val;
95         qla_rd_pci_ids_t *pci_ids;
96         qla_rd_fw_dump_t *fw_dump;
97         union {
98                 qla_reg_val_t *rv;
99                 qla_rd_flash_t *rdf;
100                 qla_wr_flash_t *wrf;
101                 qla_erase_flash_t *erf;
102                 qla_offchip_mem_val_t *mem;
103         } u;
104
105
106         if ((ha = (qla_host_t *)dev->si_drv1) == NULL)
107                 return ENXIO;
108
109         pci_dev= ha->pci_dev;
110
111         switch(cmd) {
112
113         case QLA_RDWR_REG:
114
115                 u.rv = (qla_reg_val_t *)data;
116
117                 if (u.rv->direct) {
118                         if (u.rv->rd) {
119                                 u.rv->val = READ_REG32(ha, u.rv->reg);
120                         } else {
121                                 WRITE_REG32(ha, u.rv->reg, u.rv->val);
122                         }
123                 } else {
124                         if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val,
125                                 u.rv->rd)))
126                                 rval = ENXIO;
127                 }
128                 break;
129
130         case QLA_RD_FLASH:
131
132                 if (!ha->hw.flags.fdt_valid) {
133                         rval = EIO;
134                         break;
135                 }       
136
137                 u.rdf = (qla_rd_flash_t *)data;
138                 if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data)))
139                         rval = ENXIO;
140                 break;
141
142         case QLA_WR_FLASH:
143
144                 ifp = ha->ifp;
145
146                 if (ifp == NULL) {
147                         rval = ENXIO;
148                         break;
149                 }
150
151                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
152                         rval = ENXIO;
153                         break;
154                 }
155
156                 if (!ha->hw.flags.fdt_valid) {
157                         rval = EIO;
158                         break;
159                 }       
160
161                 u.wrf = (qla_wr_flash_t *)data;
162                 if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size,
163                         u.wrf->buffer))) {
164                         printf("flash write failed[%d]\n", rval);
165                         rval = ENXIO;
166                 }
167                 break;
168
169         case QLA_ERASE_FLASH:
170
171                 ifp = ha->ifp;
172
173                 if (ifp == NULL) {
174                         rval = ENXIO;
175                         break;
176                 }
177
178                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
179                         rval = ENXIO;
180                         break;
181                 }
182
183                 if (!ha->hw.flags.fdt_valid) {
184                         rval = EIO;
185                         break;
186                 }       
187                 
188                 u.erf = (qla_erase_flash_t *)data;
189                 if ((rval = ql_erase_flash(ha, u.erf->off, 
190                         u.erf->size))) {
191                         printf("flash erase failed[%d]\n", rval);
192                         rval = ENXIO;
193                 }
194                 break;
195
196         case QLA_RDWR_MS_MEM:
197                 u.mem = (qla_offchip_mem_val_t *)data;
198
199                 if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val, 
200                         u.mem->rd)))
201                         rval = ENXIO;
202                 else {
203                         u.mem->data_lo = val.data_lo;
204                         u.mem->data_hi = val.data_hi;
205                         u.mem->data_ulo = val.data_ulo;
206                         u.mem->data_uhi = val.data_uhi;
207                 }
208
209                 break;
210
211         case QLA_RD_FW_DUMP_SIZE:
212
213                 if (ha->hw.mdump_init == 0) {
214                         rval = EINVAL;
215                         break;
216                 }
217                 
218                 fw_dump = (qla_rd_fw_dump_t *)data;
219                 fw_dump->minidump_size = ha->hw.mdump_buffer_size + 
220                                                 ha->hw.mdump_template_size;
221                 fw_dump->pci_func = ha->pci_func;
222
223                 break;
224
225         case QLA_RD_FW_DUMP:
226
227                 if (ha->hw.mdump_init == 0) {
228                         device_printf(pci_dev, "%s: minidump not initialized\n", __func__);
229                         rval = EINVAL;
230                         break;
231                 }
232                 
233                 fw_dump = (qla_rd_fw_dump_t *)data;
234
235                 if ((fw_dump->minidump == NULL) ||
236                         (fw_dump->minidump_size != (ha->hw.mdump_buffer_size +
237                                 ha->hw.mdump_template_size))) {
238                         device_printf(pci_dev,
239                                 "%s: minidump buffer [%p] size = [%d, %d] invalid\n", __func__,
240                                 fw_dump->minidump, fw_dump->minidump_size,
241                                 (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size));
242                         rval = EINVAL;
243                         break;
244                 }
245
246                 if ((ha->pci_func & 0x1)) {
247                         device_printf(pci_dev, "%s: mindump allowed only on Port0\n", __func__);
248                         rval = ENXIO;
249                         break;
250                 }
251
252                 fw_dump->saved = 1;
253
254                 if (ha->offline) {
255
256                         if (ha->enable_minidump)
257                                 ql_minidump(ha);
258
259                         fw_dump->saved = 0;
260                         fw_dump->usec_ts = ha->hw.mdump_usec_ts;
261
262                         if (!ha->hw.mdump_done) {
263                                 device_printf(pci_dev,
264                                         "%s: port offline minidump failed\n", __func__);
265                                 rval = ENXIO;
266                                 break;
267                         }
268                 } else {
269
270 #define QLA_LOCK_MDUMP_MS_TIMEOUT (QLA_LOCK_DEFAULT_MS_TIMEOUT * 5)
271                         if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) {
272                                 if (!ha->hw.mdump_done) {
273                                         fw_dump->saved = 0;
274                                         QL_INITIATE_RECOVERY(ha);
275                                         device_printf(pci_dev, "%s: recovery initiated "
276                                                 " to trigger minidump\n",
277                                                 __func__);
278                                 }
279                                 QLA_UNLOCK(ha, __func__);
280                         } else {
281                                 device_printf(pci_dev, "%s: QLA_LOCK() failed0\n", __func__);
282                                 rval = ENXIO;
283                                 break;
284                         }
285         
286 #define QLNX_DUMP_WAIT_SECS     30
287
288                         count = QLNX_DUMP_WAIT_SECS * 1000;
289
290                         while (count) {
291                                 if (ha->hw.mdump_done)
292                                         break;
293                                 qla_mdelay(__func__, 100);
294                                 count -= 100;
295                         }
296
297                         if (!ha->hw.mdump_done) {
298                                 device_printf(pci_dev,
299                                         "%s: port not offline minidump failed\n", __func__);
300                                 rval = ENXIO;
301                                 break;
302                         }
303                         fw_dump->usec_ts = ha->hw.mdump_usec_ts;
304                         
305                         if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) {
306                                 ha->hw.mdump_done = 0;
307                                 QLA_UNLOCK(ha, __func__);
308                         } else {
309                                 device_printf(pci_dev, "%s: QLA_LOCK() failed1\n", __func__);
310                                 rval = ENXIO;
311                                 break;
312                         }
313                 }
314
315                 if ((rval = copyout(ha->hw.mdump_template,
316                         fw_dump->minidump, ha->hw.mdump_template_size))) {
317                         device_printf(pci_dev, "%s: template copyout failed\n", __func__);
318                         rval = ENXIO;
319                         break;
320                 }
321
322                 if ((rval = copyout(ha->hw.mdump_buffer,
323                                 ((uint8_t *)fw_dump->minidump +
324                                         ha->hw.mdump_template_size),
325                                 ha->hw.mdump_buffer_size))) {
326                         device_printf(pci_dev, "%s: minidump copyout failed\n", __func__);
327                         rval = ENXIO;
328                 }
329                 break;
330
331         case QLA_RD_DRVR_STATE:
332                 rval = ql_drvr_state(ha, (qla_driver_state_t *)data);
333                 break;
334
335         case QLA_RD_SLOWPATH_LOG:
336                 rval = ql_slowpath_log(ha, (qla_sp_log_t *)data);
337                 break;
338
339         case QLA_RD_PCI_IDS:
340                 pci_ids = (qla_rd_pci_ids_t *)data;
341                 pci_ids->ven_id = pci_get_vendor(pci_dev);
342                 pci_ids->dev_id = pci_get_device(pci_dev);
343                 pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev);
344                 pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev);
345                 pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1);
346                 break;
347
348         default:
349                 break;
350         }
351
352         return rval;
353 }
354
355
356
357 static int
358 ql_drvr_state(qla_host_t *ha, qla_driver_state_t *state)
359 {
360         int rval = 0;
361         uint32_t drvr_state_size;
362
363         drvr_state_size = ql_drvr_state_size(ha);
364
365         if (state->buffer == NULL) {
366                 state->size = drvr_state_size;
367                 return (0);
368         }
369                 
370         if (state->size < drvr_state_size)
371                 return (ENXIO);
372
373         if (ha->hw.drvr_state == NULL)
374                 return (ENOMEM);
375
376         ql_capture_drvr_state(ha);
377
378         rval = copyout(ha->hw.drvr_state, state->buffer, drvr_state_size);
379
380         bzero(ha->hw.drvr_state, drvr_state_size);
381
382         return (rval);
383 }
384
385 static uint32_t
386 ql_drvr_state_size(qla_host_t *ha)
387 {
388         uint32_t drvr_state_size;
389         uint32_t size;
390
391         size = sizeof (qla_drvr_state_hdr_t);
392         drvr_state_size = QL_ALIGN(size, 64);
393
394         size =  ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
395         drvr_state_size += QL_ALIGN(size, 64);
396
397         size =  ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
398         drvr_state_size += QL_ALIGN(size, 64);
399
400         size =  ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
401         drvr_state_size += QL_ALIGN(size, 64);
402
403         size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS * ha->hw.num_tx_rings;
404         drvr_state_size += QL_ALIGN(size, 64);
405
406         size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS * ha->hw.num_rds_rings;
407         drvr_state_size += QL_ALIGN(size, 64);
408
409         size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS *
410                         ha->hw.num_sds_rings;
411         drvr_state_size += QL_ALIGN(size, 64);
412
413         return (drvr_state_size);
414 }
415
416 static void
417 ql_get_tx_state(qla_host_t *ha, qla_drvr_state_tx_t *tx_state)
418 {
419         int i;
420
421         for (i = 0; i < ha->hw.num_tx_rings; i++) {
422                 tx_state->base_p_addr = ha->hw.tx_cntxt[i].tx_ring_paddr;
423                 tx_state->cons_p_addr = ha->hw.tx_cntxt[i].tx_cons_paddr;
424                 tx_state->tx_prod_reg = ha->hw.tx_cntxt[i].tx_prod_reg;
425                 tx_state->tx_cntxt_id = ha->hw.tx_cntxt[i].tx_cntxt_id;
426                 tx_state->txr_free = ha->hw.tx_cntxt[i].txr_free;
427                 tx_state->txr_next = ha->hw.tx_cntxt[i].txr_next;
428                 tx_state->txr_comp = ha->hw.tx_cntxt[i].txr_comp;
429                 tx_state++;
430         }
431         return;
432 }
433
434 static void
435 ql_get_rx_state(qla_host_t *ha, qla_drvr_state_rx_t *rx_state)
436 {
437         int i;
438
439         for (i = 0; i < ha->hw.num_rds_rings; i++) {
440                 rx_state->prod_std = ha->hw.rds[i].prod_std;
441                 rx_state->rx_next = ha->hw.rds[i].rx_next;
442                 rx_state++;
443         }
444         return;
445 }
446
447 static void
448 ql_get_sds_state(qla_host_t *ha, qla_drvr_state_sds_t *sds_state)
449 {
450         int i;
451
452         for (i = 0; i < ha->hw.num_sds_rings; i++) {
453                 sds_state->sdsr_next = ha->hw.sds[i].sdsr_next;
454                 sds_state->sds_consumer = ha->hw.sds[i].sds_consumer;
455                 sds_state++;
456         }
457         return;
458 }
459
460 void
461 ql_capture_drvr_state(qla_host_t *ha)
462 {
463         uint8_t *state_buffer;
464         uint8_t *ptr;
465         qla_drvr_state_hdr_t *hdr;
466         uint32_t size;
467         int i;
468
469         state_buffer =  ha->hw.drvr_state;
470
471         if (state_buffer == NULL)
472                 return;
473
474         hdr = (qla_drvr_state_hdr_t *)state_buffer;
475         
476         hdr->saved = 0;
477
478         if (hdr->drvr_version_major) {
479                 hdr->saved = 1;
480                 return;
481         }
482
483         hdr->usec_ts = qla_get_usec_timestamp();
484
485         hdr->drvr_version_major = QLA_VERSION_MAJOR;
486         hdr->drvr_version_minor = QLA_VERSION_MINOR;
487         hdr->drvr_version_build = QLA_VERSION_BUILD;
488
489         bcopy(ha->hw.mac_addr, hdr->mac_addr, ETHER_ADDR_LEN);
490
491         hdr->link_speed = ha->hw.link_speed;
492         hdr->cable_length = ha->hw.cable_length;
493         hdr->cable_oui = ha->hw.cable_oui;
494         hdr->link_up = ha->hw.link_up;
495         hdr->module_type = ha->hw.module_type;
496         hdr->link_faults = ha->hw.link_faults;
497         hdr->rcv_intr_coalesce = ha->hw.rcv_intr_coalesce;
498         hdr->xmt_intr_coalesce = ha->hw.xmt_intr_coalesce;
499
500         size = sizeof (qla_drvr_state_hdr_t);
501         hdr->tx_state_offset = QL_ALIGN(size, 64);
502
503         ptr = state_buffer + hdr->tx_state_offset;
504
505         ql_get_tx_state(ha, (qla_drvr_state_tx_t *)ptr);
506
507         size =  ha->hw.num_tx_rings * (sizeof (qla_drvr_state_tx_t));
508         hdr->rx_state_offset = hdr->tx_state_offset + QL_ALIGN(size, 64);
509         ptr = state_buffer + hdr->rx_state_offset;
510
511         ql_get_rx_state(ha, (qla_drvr_state_rx_t *)ptr);
512
513         size =  ha->hw.num_rds_rings * (sizeof (qla_drvr_state_rx_t));
514         hdr->sds_state_offset = hdr->rx_state_offset + QL_ALIGN(size, 64);
515         ptr = state_buffer + hdr->sds_state_offset;
516
517         ql_get_sds_state(ha, (qla_drvr_state_sds_t *)ptr);
518
519         size =  ha->hw.num_sds_rings * (sizeof (qla_drvr_state_sds_t));
520         hdr->txr_offset = hdr->sds_state_offset + QL_ALIGN(size, 64);
521         ptr = state_buffer + hdr->txr_offset;
522
523         hdr->num_tx_rings = ha->hw.num_tx_rings;
524         hdr->txr_size = sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS;
525         hdr->txr_entries = NUM_TX_DESCRIPTORS;
526
527         size = hdr->num_tx_rings * hdr->txr_size;
528         bcopy(ha->hw.dma_buf.tx_ring.dma_b, ptr, size);
529
530         hdr->rxr_offset = hdr->txr_offset + QL_ALIGN(size, 64);
531         ptr = state_buffer + hdr->rxr_offset;
532
533         hdr->rxr_size = sizeof(q80_recv_desc_t) * NUM_RX_DESCRIPTORS;
534         hdr->rxr_entries = NUM_RX_DESCRIPTORS;
535         hdr->num_rx_rings = ha->hw.num_rds_rings;
536
537         for (i = 0; i < ha->hw.num_rds_rings; i++) {
538                 bcopy(ha->hw.dma_buf.rds_ring[i].dma_b, ptr, hdr->rxr_size);
539                 ptr += hdr->rxr_size;
540         }
541
542         size = hdr->rxr_size * hdr->num_rx_rings;
543         hdr->sds_offset = hdr->rxr_offset + QL_ALIGN(size, 64);
544         hdr->sds_ring_size = sizeof(q80_stat_desc_t) * NUM_STATUS_DESCRIPTORS;
545         hdr->sds_entries = NUM_STATUS_DESCRIPTORS;
546         hdr->num_sds_rings = ha->hw.num_sds_rings;
547
548         ptr = state_buffer + hdr->sds_offset;
549         for (i = 0; i < ha->hw.num_sds_rings; i++) {
550                 bcopy(ha->hw.dma_buf.sds_ring[i].dma_b, ptr, hdr->sds_ring_size);
551                 ptr += hdr->sds_ring_size;
552         }
553         return;
554 }
555
556 void
557 ql_alloc_drvr_state_buffer(qla_host_t *ha)
558 {
559         uint32_t drvr_state_size;
560
561         drvr_state_size = ql_drvr_state_size(ha);
562
563         ha->hw.drvr_state =  malloc(drvr_state_size, M_QLA83XXBUF, M_NOWAIT);   
564
565         if (ha->hw.drvr_state != NULL)
566                 bzero(ha->hw.drvr_state, drvr_state_size);
567
568         return;
569 }
570
571 void
572 ql_free_drvr_state_buffer(qla_host_t *ha)
573 {
574         if (ha->hw.drvr_state != NULL)
575                 free(ha->hw.drvr_state, M_QLA83XXBUF);
576         return;
577 }
578
579 void
580 ql_sp_log(qla_host_t *ha, uint16_t fmtstr_idx, uint16_t num_params,
581         uint32_t param0, uint32_t param1, uint32_t param2, uint32_t param3,
582         uint32_t param4)
583 {
584         qla_sp_log_entry_t *sp_e, *sp_log;
585
586         if (((sp_log = ha->hw.sp_log) == NULL) || ha->hw.sp_log_stop)
587                 return;
588
589         mtx_lock(&ha->sp_log_lock);
590
591         sp_e = &sp_log[ha->hw.sp_log_index];
592
593         bzero(sp_e, sizeof (qla_sp_log_entry_t));
594
595         sp_e->fmtstr_idx = fmtstr_idx;
596         sp_e->num_params = num_params;
597
598         sp_e->usec_ts = qla_get_usec_timestamp();
599
600         sp_e->params[0] = param0;
601         sp_e->params[1] = param1;
602         sp_e->params[2] = param2;
603         sp_e->params[3] = param3;
604         sp_e->params[4] = param4;
605
606         ha->hw.sp_log_index = (ha->hw.sp_log_index + 1) & (NUM_LOG_ENTRIES - 1);
607
608         if (ha->hw.sp_log_num_entries < NUM_LOG_ENTRIES)
609                 ha->hw.sp_log_num_entries++;
610
611         mtx_unlock(&ha->sp_log_lock);
612
613         return;
614 }
615
616 void
617 ql_alloc_sp_log_buffer(qla_host_t *ha)
618 {
619         uint32_t size;
620
621         size = (sizeof(qla_sp_log_entry_t)) * NUM_LOG_ENTRIES;
622
623         ha->hw.sp_log =  malloc(size, M_QLA83XXBUF, M_NOWAIT);  
624
625         if (ha->hw.sp_log != NULL)
626                 bzero(ha->hw.sp_log, size);
627
628         ha->hw.sp_log_index = 0;
629         ha->hw.sp_log_num_entries = 0;
630
631         return;
632 }
633
634 void
635 ql_free_sp_log_buffer(qla_host_t *ha)
636 {
637         if (ha->hw.sp_log != NULL)
638                 free(ha->hw.sp_log, M_QLA83XXBUF);
639         return;
640 }
641
642 static int
643 ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log)
644 {
645         int rval = 0;
646         uint32_t size;
647
648         if ((ha->hw.sp_log == NULL) || (log->buffer == NULL))
649                 return (EINVAL);
650
651         size = (sizeof(qla_sp_log_entry_t) * NUM_LOG_ENTRIES);
652
653         mtx_lock(&ha->sp_log_lock);
654
655         rval = copyout(ha->hw.sp_log, log->buffer, size);
656
657         if (!rval) {
658                 log->next_idx = ha->hw.sp_log_index;
659                 log->num_entries = ha->hw.sp_log_num_entries;
660         }
661         device_printf(ha->pci_dev,
662                 "%s: exit [rval = %d][%p, next_idx = %d, %d entries, %d bytes]\n",
663                 __func__, rval, log->buffer, log->next_idx, log->num_entries, size);
664         mtx_unlock(&ha->sp_log_lock);
665
666         return (rval);
667 }
668