3 * Copyright (c) 1997-2006 by Matthew Jacob
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice immediately at the beginning of the file, without modification,
11 * this list of conditions, and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #if __FreeBSD_version >= 700000
40 #include <sys/linker.h>
41 #include <sys/firmware.h>
44 #if __FreeBSD_version < 500000
45 #include <pci/pcireg.h>
46 #include <pci/pcivar.h>
47 #include <machine/bus_memio.h>
48 #include <machine/bus_pio.h>
50 #include <sys/stdint.h>
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
54 #include <machine/bus.h>
55 #include <machine/resource.h>
57 #include <sys/malloc.h>
59 #include <dev/isp/isp_freebsd.h>
61 #if __FreeBSD_version < 500000
62 #define BUS_PROBE_DEFAULT 0
65 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int);
66 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t);
67 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
68 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t);
69 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int);
70 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t);
72 isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
74 isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
76 isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
77 static int isp_pci_mbxdma(ispsoftc_t *);
79 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t);
81 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint32_t);
84 static void isp_pci_reset1(ispsoftc_t *);
85 static void isp_pci_dumpregs(ispsoftc_t *, const char *);
87 static struct ispmdvec mdvec = {
98 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
101 static struct ispmdvec mdvec_1080 = {
112 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
115 static struct ispmdvec mdvec_12160 = {
126 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
129 static struct ispmdvec mdvec_2100 = {
141 static struct ispmdvec mdvec_2200 = {
153 static struct ispmdvec mdvec_2300 = {
165 static struct ispmdvec mdvec_2400 = {
177 #ifndef PCIM_CMD_INVEN
178 #define PCIM_CMD_INVEN 0x10
180 #ifndef PCIM_CMD_BUSMASTEREN
181 #define PCIM_CMD_BUSMASTEREN 0x0004
183 #ifndef PCIM_CMD_PERRESPEN
184 #define PCIM_CMD_PERRESPEN 0x0040
186 #ifndef PCIM_CMD_SEREN
187 #define PCIM_CMD_SEREN 0x0100
189 #ifndef PCIM_CMD_INTX_DISABLE
190 #define PCIM_CMD_INTX_DISABLE 0x0400
194 #define PCIR_COMMAND 0x04
197 #ifndef PCIR_CACHELNSZ
198 #define PCIR_CACHELNSZ 0x0c
201 #ifndef PCIR_LATTIMER
202 #define PCIR_LATTIMER 0x0d
206 #define PCIR_ROMADDR 0x30
209 #ifndef PCI_VENDOR_QLOGIC
210 #define PCI_VENDOR_QLOGIC 0x1077
213 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
214 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
217 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
218 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
221 #ifndef PCI_PRODUCT_QLOGIC_ISP10160
222 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
225 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
226 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
229 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
230 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
233 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
234 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
237 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
238 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
241 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
242 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
245 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
246 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
249 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
250 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
253 #ifndef PCI_PRODUCT_QLOGIC_ISP2322
254 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322
257 #ifndef PCI_PRODUCT_QLOGIC_ISP2422
258 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422
261 #ifndef PCI_PRODUCT_QLOGIC_ISP6312
262 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312
265 #ifndef PCI_PRODUCT_QLOGIC_ISP6322
266 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322
270 #define PCI_QLOGIC_ISP1020 \
271 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
273 #define PCI_QLOGIC_ISP1080 \
274 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
276 #define PCI_QLOGIC_ISP10160 \
277 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
279 #define PCI_QLOGIC_ISP12160 \
280 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
282 #define PCI_QLOGIC_ISP1240 \
283 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
285 #define PCI_QLOGIC_ISP1280 \
286 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
288 #define PCI_QLOGIC_ISP2100 \
289 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
291 #define PCI_QLOGIC_ISP2200 \
292 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
294 #define PCI_QLOGIC_ISP2300 \
295 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
297 #define PCI_QLOGIC_ISP2312 \
298 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
300 #define PCI_QLOGIC_ISP2322 \
301 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
303 #define PCI_QLOGIC_ISP2422 \
304 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
306 #define PCI_QLOGIC_ISP6312 \
307 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
309 #define PCI_QLOGIC_ISP6322 \
310 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
313 * Odd case for some AMI raid cards... We need to *not* attach to this.
315 #define AMI_RAID_SUBVENDOR_ID 0x101e
317 #define IO_MAP_REG 0x10
318 #define MEM_MAP_REG 0x14
320 #define PCI_DFLT_LTNCY 0x40
321 #define PCI_DFLT_LNSZ 0x10
323 static int isp_pci_probe (device_t);
324 static int isp_pci_attach (device_t);
325 static int isp_pci_detach (device_t);
328 struct isp_pcisoftc {
331 struct resource * pci_reg;
332 bus_space_tag_t pci_st;
333 bus_space_handle_t pci_sh;
335 int16_t pci_poff[_NREG_BLKS];
341 static device_method_t isp_pci_methods[] = {
342 /* Device interface */
343 DEVMETHOD(device_probe, isp_pci_probe),
344 DEVMETHOD(device_attach, isp_pci_attach),
345 DEVMETHOD(device_detach, isp_pci_detach),
348 static void isp_pci_intr(void *);
350 static driver_t isp_pci_driver = {
351 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
353 static devclass_t isp_devclass;
354 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
355 #if __FreeBSD_version < 700000
356 extern ispfwfunc *isp_get_firmware_p;
360 isp_pci_probe(device_t dev)
362 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
363 case PCI_QLOGIC_ISP1020:
364 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
366 case PCI_QLOGIC_ISP1080:
367 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
369 case PCI_QLOGIC_ISP1240:
370 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
372 case PCI_QLOGIC_ISP1280:
373 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
375 case PCI_QLOGIC_ISP10160:
376 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
378 case PCI_QLOGIC_ISP12160:
379 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
382 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
384 case PCI_QLOGIC_ISP2100:
385 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
387 case PCI_QLOGIC_ISP2200:
388 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
390 case PCI_QLOGIC_ISP2300:
391 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
393 case PCI_QLOGIC_ISP2312:
394 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
396 case PCI_QLOGIC_ISP2322:
397 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
399 case PCI_QLOGIC_ISP2422:
400 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
402 case PCI_QLOGIC_ISP6312:
403 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
405 case PCI_QLOGIC_ISP6322:
406 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
411 if (isp_announced == 0 && bootverbose) {
412 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
413 "Core Version %d.%d\n",
414 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
415 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
419 * XXXX: Here is where we might load the f/w module
420 * XXXX: (or increase a reference count to it).
422 return (BUS_PROBE_DEFAULT);
425 #if __FreeBSD_version < 500000
427 isp_get_options(device_t dev, ispsoftc_t *isp)
432 callout_handle_init(&isp->isp_osinfo.ldt);
433 callout_handle_init(&isp->isp_osinfo.gdt);
435 unit = device_get_unit(dev);
436 if (getenv_int("isp_disable", &bitmap)) {
437 if (bitmap & (1 << unit)) {
438 isp->isp_osinfo.disabled = 1;
443 if (getenv_int("isp_no_fwload", &bitmap)) {
444 if (bitmap & (1 << unit))
445 isp->isp_confopts |= ISP_CFG_NORELOAD;
447 if (getenv_int("isp_fwload", &bitmap)) {
448 if (bitmap & (1 << unit))
449 isp->isp_confopts &= ~ISP_CFG_NORELOAD;
451 if (getenv_int("isp_no_nvram", &bitmap)) {
452 if (bitmap & (1 << unit))
453 isp->isp_confopts |= ISP_CFG_NONVRAM;
455 if (getenv_int("isp_nvram", &bitmap)) {
456 if (bitmap & (1 << unit))
457 isp->isp_confopts &= ~ISP_CFG_NONVRAM;
459 if (getenv_int("isp_fcduplex", &bitmap)) {
460 if (bitmap & (1 << unit))
461 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
463 if (getenv_int("isp_no_fcduplex", &bitmap)) {
464 if (bitmap & (1 << unit))
465 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
467 if (getenv_int("isp_nport", &bitmap)) {
468 if (bitmap & (1 << unit))
469 isp->isp_confopts |= ISP_CFG_NPORT;
473 * Because the resource_*_value functions can neither return
474 * 64 bit integer values, nor can they be directly coerced
475 * to interpret the right hand side of the assignment as
476 * you want them to interpret it, we have to force WWN
477 * hint replacement to specify WWN strings with a leading
478 * 'w' (e..g w50000000aaaa0001). Sigh.
480 if (getenv_quad("isp_portwwn", &wwn)) {
481 isp->isp_osinfo.default_port_wwn = wwn;
482 isp->isp_confopts |= ISP_CFG_OWNWWPN;
484 if (isp->isp_osinfo.default_port_wwn == 0) {
485 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
488 if (getenv_quad("isp_nodewwn", &wwn)) {
489 isp->isp_osinfo.default_node_wwn = wwn;
490 isp->isp_confopts |= ISP_CFG_OWNWWNN;
492 if (isp->isp_osinfo.default_node_wwn == 0) {
493 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
497 (void) getenv_int("isp_debug", &bitmap);
499 isp->isp_dblev = bitmap;
501 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
504 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
508 (void) getenv_int("isp_fabric_hysteresis", &bitmap);
509 if (bitmap >= 0 && bitmap < 256) {
510 isp->isp_osinfo.hysteresis = bitmap;
512 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis;
516 (void) getenv_int("isp_loop_down_limit", &bitmap);
517 if (bitmap >= 0 && bitmap < 0xffff) {
518 isp->isp_osinfo.loop_down_limit = bitmap;
520 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit;
524 (void) getenv_int("isp_gone_device_time", &bitmap);
525 if (bitmap >= 0 && bitmap < 0xffff) {
526 isp->isp_osinfo.gone_device_time = bitmap;
528 isp->isp_osinfo.gone_device_time = isp_gone_device_time;
532 #ifdef ISP_FW_CRASH_DUMP
534 if (getenv_int("isp_fw_dump_enable", &bitmap)) {
535 if (bitmap & (1 << unit) {
538 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
539 } else if (IS_23XX(isp)) {
540 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
543 FCPARAM(isp)->isp_dump_data =
544 malloc(amt, M_DEVBUF, M_WAITOK);
545 memset(FCPARAM(isp)->isp_dump_data, 0, amt);
548 "f/w crash dumps not supported for card\n");
554 if (getenv_int("role", &bitmap)) {
555 isp->isp_role = bitmap;
557 isp->isp_role = ISP_DEFAULT_ROLES;
562 isp_get_pci_options(device_t dev, int *m1, int *m2)
565 int unit = device_get_unit(dev);
567 *m1 = PCIM_CMD_MEMEN;
568 *m2 = PCIM_CMD_PORTEN;
569 if (getenv_int("isp_mem_map", &bitmap)) {
570 if (bitmap & (1 << unit)) {
571 *m1 = PCIM_CMD_MEMEN;
572 *m2 = PCIM_CMD_PORTEN;
576 if (getenv_int("isp_io_map", &bitmap)) {
577 if (bitmap & (1 << unit)) {
578 *m1 = PCIM_CMD_PORTEN;
579 *m2 = PCIM_CMD_MEMEN;
585 isp_get_options(device_t dev, ispsoftc_t *isp)
590 callout_handle_init(&isp->isp_osinfo.ldt);
591 callout_handle_init(&isp->isp_osinfo.gdt);
594 * Figure out if we're supposed to skip this one.
598 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
599 "disable", &tval) == 0 && tval) {
600 device_printf(dev, "disabled at user request\n");
601 isp->isp_osinfo.disabled = 1;
606 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
607 "role", &tval) == 0 && tval != -1) {
608 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET);
609 isp->isp_role = tval;
610 device_printf(dev, "setting role to 0x%x\n", isp->isp_role);
612 #ifdef ISP_TARGET_MODE
613 isp->isp_role = ISP_ROLE_TARGET;
615 isp->isp_role = ISP_DEFAULT_ROLES;
620 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
621 "fwload_disable", &tval) == 0 && tval != 0) {
622 isp->isp_confopts |= ISP_CFG_NORELOAD;
625 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
626 "ignore_nvram", &tval) == 0 && tval != 0) {
627 isp->isp_confopts |= ISP_CFG_NONVRAM;
630 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
631 "fullduplex", &tval) == 0 && tval != 0) {
632 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
634 #ifdef ISP_FW_CRASH_DUMP
636 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
637 "fw_dump_enable", &tval) == 0 && tval != 0) {
640 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
641 } else if (IS_23XX(isp)) {
642 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
645 FCPARAM(isp)->isp_dump_data =
646 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
649 "f/w crash dumps not supported for this model\n");
655 if (resource_string_value(device_get_name(dev), device_get_unit(dev),
656 "topology", (const char **) &sptr) == 0 && sptr != 0) {
657 if (strcmp(sptr, "lport") == 0) {
658 isp->isp_confopts |= ISP_CFG_LPORT;
659 } else if (strcmp(sptr, "nport") == 0) {
660 isp->isp_confopts |= ISP_CFG_NPORT;
661 } else if (strcmp(sptr, "lport-only") == 0) {
662 isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
663 } else if (strcmp(sptr, "nport-only") == 0) {
664 isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
669 * Because the resource_*_value functions can neither return
670 * 64 bit integer values, nor can they be directly coerced
671 * to interpret the right hand side of the assignment as
672 * you want them to interpret it, we have to force WWN
673 * hint replacement to specify WWN strings with a leading
674 * 'w' (e..g w50000000aaaa0001). Sigh.
677 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
678 "portwwn", (const char **) &sptr);
679 if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
681 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
682 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
683 device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
684 isp->isp_osinfo.default_port_wwn = 0;
686 isp->isp_confopts |= ISP_CFG_OWNWWPN;
689 if (isp->isp_osinfo.default_port_wwn == 0) {
690 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
694 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
695 "nodewwn", (const char **) &sptr);
696 if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
698 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
699 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
700 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
701 isp->isp_osinfo.default_node_wwn = 0;
703 isp->isp_confopts |= ISP_CFG_OWNWWNN;
706 if (isp->isp_osinfo.default_node_wwn == 0) {
707 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
710 isp->isp_osinfo.default_id = -1;
711 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
712 "iid", &tval) == 0) {
713 isp->isp_osinfo.default_id = tval;
714 isp->isp_confopts |= ISP_CFG_OWNLOOPID;
716 if (isp->isp_osinfo.default_id == -1) {
718 isp->isp_osinfo.default_id = 109;
720 isp->isp_osinfo.default_id = 7;
725 * Set up logging levels.
728 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
731 isp->isp_dblev = tval;
733 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
736 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
740 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
741 "hysteresis", &tval);
742 if (tval >= 0 && tval < 256) {
743 isp->isp_osinfo.hysteresis = tval;
745 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis;
749 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
750 "loop_down_limit", &tval);
751 if (tval >= 0 && tval < 0xffff) {
752 isp->isp_osinfo.loop_down_limit = tval;
754 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit;
758 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
759 "gone_device_time", &tval);
760 if (tval >= 0 && tval < 0xffff) {
761 isp->isp_osinfo.gone_device_time = tval;
763 isp->isp_osinfo.gone_device_time = isp_gone_device_time;
768 isp_get_pci_options(device_t dev, int *m1, int *m2)
772 * Which we should try first - memory mapping or i/o mapping?
774 * We used to try memory first followed by i/o on alpha, otherwise
775 * the reverse, but we should just try memory first all the time now.
777 *m1 = PCIM_CMD_MEMEN;
778 *m2 = PCIM_CMD_PORTEN;
781 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
782 "prefer_iomap", &tval) == 0 && tval != 0) {
783 *m1 = PCIM_CMD_PORTEN;
784 *m2 = PCIM_CMD_MEMEN;
787 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
788 "prefer_memmap", &tval) == 0 && tval != 0) {
789 *m1 = PCIM_CMD_MEMEN;
790 *m2 = PCIM_CMD_PORTEN;
796 isp_pci_attach(device_t dev)
798 struct resource *regs, *irq;
799 int rtp, rgd, iqd, m1, m2;
800 uint32_t data, cmd, linesz, psize, basetype;
801 struct isp_pcisoftc *pcs;
802 ispsoftc_t *isp = NULL;
803 struct ispmdvec *mdvp;
804 #if __FreeBSD_version >= 500000
808 pcs = device_get_softc(dev);
810 device_printf(dev, "cannot get softc\n");
813 memset(pcs, 0, sizeof (*pcs));
818 * Set and Get Generic Options
820 isp_get_options(dev, isp);
823 * Check to see if options have us disabled
825 if (isp->isp_osinfo.disabled) {
827 * But return zero to preserve unit numbering
833 * Get PCI options- which in this case are just mapping preferences.
835 isp_get_pci_options(dev, &m1, &m2);
837 linesz = PCI_DFLT_LNSZ;
841 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
843 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
844 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
845 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
847 if (regs == NULL && (cmd & m2)) {
848 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
849 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
850 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
853 device_printf(dev, "unable to map any ports\n");
857 device_printf(dev, "using %s space register mapping\n",
858 (rgd == IO_MAP_REG)? "I/O" : "Memory");
862 pcs->pci_st = rman_get_bustag(regs);
863 pcs->pci_sh = rman_get_bushandle(regs);
865 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
866 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
867 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
868 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
869 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
871 basetype = ISP_HA_SCSI_UNKNOWN;
872 psize = sizeof (sdparam);
873 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
875 basetype = ISP_HA_SCSI_UNKNOWN;
876 psize = sizeof (sdparam);
878 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
880 basetype = ISP_HA_SCSI_1080;
881 psize = sizeof (sdparam);
882 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
883 ISP1080_DMA_REGS_OFF;
885 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
887 basetype = ISP_HA_SCSI_1240;
888 psize = 2 * sizeof (sdparam);
889 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
890 ISP1080_DMA_REGS_OFF;
892 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
894 basetype = ISP_HA_SCSI_1280;
895 psize = 2 * sizeof (sdparam);
896 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
897 ISP1080_DMA_REGS_OFF;
899 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
901 basetype = ISP_HA_SCSI_10160;
902 psize = sizeof (sdparam);
903 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
904 ISP1080_DMA_REGS_OFF;
906 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
908 basetype = ISP_HA_SCSI_12160;
909 psize = 2 * sizeof (sdparam);
910 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
911 ISP1080_DMA_REGS_OFF;
913 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
915 basetype = ISP_HA_FC_2100;
916 psize = sizeof (fcparam);
917 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
918 PCI_MBOX_REGS2100_OFF;
919 if (pci_get_revid(dev) < 3) {
921 * XXX: Need to get the actual revision
922 * XXX: number of the 2100 FB. At any rate,
923 * XXX: lower cache line size for early revision
929 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
931 basetype = ISP_HA_FC_2200;
932 psize = sizeof (fcparam);
933 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
934 PCI_MBOX_REGS2100_OFF;
936 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
938 basetype = ISP_HA_FC_2300;
939 psize = sizeof (fcparam);
940 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
941 PCI_MBOX_REGS2300_OFF;
943 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 ||
944 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
946 basetype = ISP_HA_FC_2312;
947 psize = sizeof (fcparam);
948 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
949 PCI_MBOX_REGS2300_OFF;
951 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 ||
952 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) {
954 basetype = ISP_HA_FC_2322;
955 psize = sizeof (fcparam);
956 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
957 PCI_MBOX_REGS2300_OFF;
959 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) {
961 basetype = ISP_HA_FC_2400;
962 psize = sizeof (fcparam);
963 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
964 PCI_MBOX_REGS2400_OFF;
967 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
968 if (isp->isp_param == NULL) {
969 device_printf(dev, "cannot allocate parameter data\n");
972 isp->isp_mdvec = mdvp;
973 isp->isp_type = basetype;
974 isp->isp_revision = pci_get_revid(dev);
977 #if __FreeBSD_version >= 700000
979 * Try and find firmware for this device.
983 unsigned int did = pci_get_device(dev);
986 * Map a few pci ids to fw names
989 case PCI_PRODUCT_QLOGIC_ISP1020:
992 case PCI_PRODUCT_QLOGIC_ISP1240:
995 case PCI_PRODUCT_QLOGIC_ISP10160:
996 case PCI_PRODUCT_QLOGIC_ISP12160:
999 case PCI_PRODUCT_QLOGIC_ISP6312:
1000 case PCI_PRODUCT_QLOGIC_ISP2312:
1003 case PCI_PRODUCT_QLOGIC_ISP6322:
1006 case PCI_PRODUCT_QLOGIC_ISP2422:
1013 isp->isp_osinfo.fw = NULL;
1014 if (isp->isp_role & ISP_ROLE_TARGET) {
1015 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did);
1016 isp->isp_osinfo.fw = firmware_get(fwname);
1018 if (isp->isp_osinfo.fw == NULL) {
1019 snprintf(fwname, sizeof (fwname), "isp_%04x", did);
1020 isp->isp_osinfo.fw = firmware_get(fwname);
1022 if (isp->isp_osinfo.fw != NULL) {
1027 u.fred = isp->isp_osinfo.fw->data;
1028 isp->isp_mdvec->dv_ispfw = u.bob;
1032 if (isp_get_firmware_p) {
1033 int device = (int) pci_get_device(dev);
1034 #ifdef ISP_TARGET_MODE
1035 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
1037 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
1043 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
1046 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
1047 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
1049 if (IS_2300(isp)) { /* per QLogic errata */
1050 cmd &= ~PCIM_CMD_INVEN;
1053 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
1054 cmd &= ~PCIM_CMD_INTX_DISABLE;
1060 cmd &= ~PCIM_CMD_INTX_DISABLE;
1063 * Is this a PCI-X card? If so, set max read byte count.
1065 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) {
1069 pxcmd = pci_read_config(dev, reg, 2);
1072 pci_write_config(dev, reg, 2, pxcmd);
1076 * Is this a PCI Express card? If so, set max read byte count.
1078 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) {
1082 pectl = pci_read_config(dev, reg, 2);
1085 pci_write_config(dev, reg, 2, pectl);
1089 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
1092 * Make sure the Cache Line Size register is set sensibly.
1094 data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
1095 if (data != linesz) {
1096 data = PCI_DFLT_LNSZ;
1097 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
1098 pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
1102 * Make sure the Latency Timer is sane.
1104 data = pci_read_config(dev, PCIR_LATTIMER, 1);
1105 if (data < PCI_DFLT_LTNCY) {
1106 data = PCI_DFLT_LTNCY;
1107 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
1108 pci_write_config(dev, PCIR_LATTIMER, data, 1);
1112 * Make sure we've disabled the ROM.
1114 data = pci_read_config(dev, PCIR_ROMADDR, 4);
1116 pci_write_config(dev, PCIR_ROMADDR, data, 4);
1119 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
1120 RF_ACTIVE | RF_SHAREABLE);
1122 device_printf(dev, "could not allocate interrupt\n");
1126 #if __FreeBSD_version >= 500000
1127 /* Make sure the lock is set up. */
1128 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
1132 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) {
1133 device_printf(dev, "could not setup interrupt\n");
1138 * Last minute checks...
1140 if (IS_23XX(isp) || IS_24XX(isp)) {
1141 isp->isp_port = pci_get_function(dev);
1146 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
1148 isp->isp_touched = 1;
1152 * Make sure we're in reset state.
1156 if (isp->isp_state != ISP_RESETSTATE) {
1161 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
1167 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
1173 * XXXX: Here is where we might unload the f/w module
1174 * XXXX: (or decrease the reference count to it).
1182 if (pcs && pcs->ih) {
1183 (void) bus_teardown_intr(dev, irq, pcs->ih);
1186 #if __FreeBSD_version >= 500000
1187 if (locksetup && isp) {
1188 mtx_destroy(&isp->isp_osinfo.lock);
1193 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
1198 (void) bus_release_resource(dev, rtp, rgd, regs);
1202 if (pcs->pci_isp.isp_param) {
1203 #ifdef ISP_FW_CRASH_DUMP
1204 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) {
1205 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF);
1208 free(pcs->pci_isp.isp_param, M_DEVBUF);
1213 * XXXX: Here is where we might unload the f/w module
1214 * XXXX: (or decrease the reference count to it).
1220 isp_pci_detach(device_t dev)
1222 struct isp_pcisoftc *pcs;
1225 pcs = device_get_softc(dev);
1229 isp = (ispsoftc_t *) pcs;
1230 ISP_DISABLE_INTS(isp);
1235 isp_pci_intr(void *arg)
1237 ispsoftc_t *isp = arg;
1239 uint16_t sema, mbox;
1243 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1244 isp->isp_intbogus++;
1246 isp_intr(isp, isr, sema, mbox);
1252 #define IspVirt2Off(a, x) \
1253 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1254 _BLK_REG_SHFT] + ((x) & 0xfff))
1256 #define BXR2(pcs, off) \
1257 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
1258 #define BXW2(pcs, off, v) \
1259 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
1260 #define BXR4(pcs, off) \
1261 bus_space_read_4(pcs->pci_st, pcs->pci_sh, off)
1262 #define BXW4(pcs, off, v) \
1263 bus_space_write_4(pcs->pci_st, pcs->pci_sh, off, v)
1267 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp)
1269 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1270 uint32_t val0, val1;
1274 val0 = BXR2(pcs, IspVirt2Off(isp, off));
1275 val1 = BXR2(pcs, IspVirt2Off(isp, off));
1276 } while (val0 != val1 && ++i < 1000);
1285 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp,
1286 uint16_t *semap, uint16_t *mbp)
1288 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1292 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
1295 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
1299 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
1300 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
1302 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1303 isr &= INT_PENDING_MASK(isp);
1304 sema &= BIU_SEMA_LOCK;
1305 if (isr == 0 && sema == 0) {
1309 if ((*semap = sema) != 0) {
1311 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
1315 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
1322 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp,
1323 uint16_t *semap, uint16_t *mbox0p)
1325 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1329 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
1333 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU_R2HSTSLO));
1334 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1335 if ((r2hisr & BIU_R2HST_INTR) == 0) {
1339 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
1340 case ISPR2HST_ROM_MBX_OK:
1341 case ISPR2HST_ROM_MBX_FAIL:
1342 case ISPR2HST_MBX_OK:
1343 case ISPR2HST_MBX_FAIL:
1344 case ISPR2HST_ASYNC_EVENT:
1345 *isrp = r2hisr & 0xffff;
1346 *mbox0p = (r2hisr >> 16);
1349 case ISPR2HST_RIO_16:
1350 *isrp = r2hisr & 0xffff;
1351 *mbox0p = ASYNC_RIO1;
1354 case ISPR2HST_FPOST:
1355 *isrp = r2hisr & 0xffff;
1356 *mbox0p = ASYNC_CMD_CMPLT;
1359 case ISPR2HST_FPOST_CTIO:
1360 *isrp = r2hisr & 0xffff;
1361 *mbox0p = ASYNC_CTIO_DONE;
1364 case ISPR2HST_RSPQ_UPDATE:
1365 *isrp = r2hisr & 0xffff;
1370 hccr = ISP_READ(isp, HCCR);
1371 if (hccr & HCCR_PAUSE) {
1372 ISP_WRITE(isp, HCCR, HCCR_RESET);
1373 isp_prt(isp, ISP_LOGERR,
1374 "RISC paused at interrupt (%x->%x\n", hccr,
1375 ISP_READ(isp, HCCR));
1377 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n",
1385 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp,
1386 uint16_t *semap, uint16_t *mbox0p)
1388 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1391 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU2400_R2HSTSLO));
1392 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1393 if ((r2hisr & BIU2400_R2HST_INTR) == 0) {
1397 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) {
1398 case ISP2400R2HST_ROM_MBX_OK:
1399 case ISP2400R2HST_ROM_MBX_FAIL:
1400 case ISP2400R2HST_MBX_OK:
1401 case ISP2400R2HST_MBX_FAIL:
1402 case ISP2400R2HST_ASYNC_EVENT:
1403 *isrp = r2hisr & 0xffff;
1404 *mbox0p = (r2hisr >> 16);
1407 case ISP2400R2HST_RSPQ_UPDATE:
1408 case ISP2400R2HST_ATIO_RSPQ_UPDATE:
1409 case ISP2400R2HST_ATIO_RQST_UPDATE:
1410 *isrp = r2hisr & 0xffff;
1415 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
1416 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1422 isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1425 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1428 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1430 * We will assume that someone has paused the RISC processor.
1432 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1433 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1434 oldconf | BIU_PCI_CONF1_SXP);
1436 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1437 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1438 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1444 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val)
1446 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1450 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1452 * We will assume that someone has paused the RISC processor.
1454 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1455 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1456 oldconf | BIU_PCI_CONF1_SXP);
1457 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1459 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1460 junk = BXR2(pcs, IspVirt2Off(isp, regoff));
1461 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1462 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1463 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1468 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1470 uint32_t rv, oc = 0;
1471 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1473 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1474 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1477 * We will assume that someone has paused the RISC processor.
1479 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1480 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1481 if (regoff & SXP_BANK1_SELECT)
1482 tc |= BIU_PCI1080_CONF1_SXP1;
1484 tc |= BIU_PCI1080_CONF1_SXP0;
1485 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1486 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1487 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1488 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1489 oc | BIU_PCI1080_CONF1_DMA);
1491 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1493 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1499 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val)
1501 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1505 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1506 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1509 * We will assume that someone has paused the RISC processor.
1511 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1512 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1513 if (regoff & SXP_BANK1_SELECT)
1514 tc |= BIU_PCI1080_CONF1_SXP1;
1516 tc |= BIU_PCI1080_CONF1_SXP0;
1517 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1518 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1519 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1520 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1521 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1522 oc | BIU_PCI1080_CONF1_DMA);
1523 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1525 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1526 junk = BXR2(pcs, IspVirt2Off(isp, regoff));
1528 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1529 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1534 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff)
1536 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1538 int block = regoff & _BLK_REG_MASK;
1544 return (BXR2(pcs, IspVirt2Off(pcs, regoff)));
1546 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff);
1547 return (0xffffffff);
1549 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff);
1550 return (0xffffffff);
1552 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff);
1553 return (0xffffffff);
1555 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff);
1556 return (0xffffffff);
1561 case BIU2400_FLASH_ADDR:
1562 case BIU2400_FLASH_DATA:
1566 case BIU2400_REQINP:
1567 case BIU2400_REQOUTP:
1568 case BIU2400_RSPINP:
1569 case BIU2400_RSPOUTP:
1570 case BIU2400_PRI_RQINP:
1571 case BIU2400_PRI_RSPINP:
1572 case BIU2400_ATIO_RSPINP:
1573 case BIU2400_ATIO_REQINP:
1578 rv = BXR4(pcs, IspVirt2Off(pcs, regoff));
1580 case BIU2400_R2HSTSLO:
1581 rv = BXR4(pcs, IspVirt2Off(pcs, regoff));
1583 case BIU2400_R2HSTSHI:
1584 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)) >> 16;
1587 isp_prt(isp, ISP_LOGERR,
1588 "isp_pci_rd_reg_2400: unknown offset %x", regoff);
1596 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val)
1598 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1599 int block = regoff & _BLK_REG_MASK;
1606 BXW2(pcs, IspVirt2Off(pcs, regoff), val);
1607 junk = BXR2(pcs, IspVirt2Off(pcs, regoff));
1610 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff);
1613 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff);
1616 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff);
1619 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x",
1625 case BIU2400_FLASH_ADDR:
1626 case BIU2400_FLASH_DATA:
1630 case BIU2400_REQINP:
1631 case BIU2400_REQOUTP:
1632 case BIU2400_RSPINP:
1633 case BIU2400_RSPOUTP:
1634 case BIU2400_PRI_RQINP:
1635 case BIU2400_PRI_RSPINP:
1636 case BIU2400_ATIO_RSPINP:
1637 case BIU2400_ATIO_REQINP:
1642 BXW4(pcs, IspVirt2Off(pcs, regoff), val);
1643 junk = BXR4(pcs, IspVirt2Off(pcs, regoff));
1646 isp_prt(isp, ISP_LOGERR,
1647 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff);
1658 static void imc(void *, bus_dma_segment_t *, int, int);
1661 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1663 struct imush *imushp = (struct imush *) arg;
1665 imushp->error = error;
1667 ispsoftc_t *isp =imushp->isp;
1668 bus_addr_t addr = segs->ds_addr;
1670 isp->isp_rquest_dma = addr;
1671 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1672 isp->isp_result_dma = addr;
1674 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1675 FCPARAM(isp)->isp_scdma = addr;
1681 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1683 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1685 #if __FreeBSD_version < 500000
1686 #define BUS_DMA_ROOTARG NULL
1687 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \
1688 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z)
1689 #elif __FreeBSD_version < 700020
1690 #define BUS_DMA_ROOTARG NULL
1691 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \
1692 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \
1693 busdma_lock_mutex, &Giant, z)
1695 #define BUS_DMA_ROOTARG bus_get_dma_tag(pcs->pci_dev)
1696 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \
1697 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \
1698 busdma_lock_mutex, &Giant, z)
1702 isp_pci_mbxdma(ispsoftc_t *isp)
1704 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1708 bus_size_t slim; /* segment size */
1709 bus_addr_t llim; /* low limit of unavailable dma */
1710 bus_addr_t hlim; /* high limit of unavailable dma */
1714 * Already been here? If so, leave...
1716 if (isp->isp_rquest) {
1720 if (isp->isp_maxcmds == 0) {
1721 isp_prt(isp, ISP_LOGERR, "maxcmds not set");
1725 hlim = BUS_SPACE_MAXADDR;
1726 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1727 slim = (bus_size_t) (1ULL << 32);
1728 llim = BUS_SPACE_MAXADDR;
1730 llim = BUS_SPACE_MAXADDR_32BIT;
1735 * XXX: We don't really support 64 bit target mode for parallel scsi yet
1737 #ifdef ISP_TARGET_MODE
1738 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) {
1739 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet");
1745 if (isp_dma_tag_create(BUS_DMA_ROOTARG, 1, slim, llim,
1746 hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0,
1748 isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1754 len = sizeof (XS_T **) * isp->isp_maxcmds;
1755 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1756 if (isp->isp_xflist == NULL) {
1757 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1761 #ifdef ISP_TARGET_MODE
1762 len = sizeof (void **) * isp->isp_maxcmds;
1763 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1764 if (isp->isp_tgtlist == NULL) {
1765 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1770 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1771 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
1772 if (pcs->dmaps == NULL) {
1773 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1774 free(isp->isp_xflist, M_DEVBUF);
1775 #ifdef ISP_TARGET_MODE
1776 free(isp->isp_tgtlist, M_DEVBUF);
1783 * Allocate and map the request, result queues, plus FC scratch area.
1785 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1786 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1788 len += ISP2100_SCRLEN;
1791 ns = (len / PAGE_SIZE) + 1;
1793 * Create a tag for the control spaces- force it to within 32 bits.
1795 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim,
1796 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1797 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1798 isp_prt(isp, ISP_LOGERR,
1799 "cannot create a dma tag for control spaces");
1800 free(pcs->dmaps, M_DEVBUF);
1801 free(isp->isp_xflist, M_DEVBUF);
1802 #ifdef ISP_TARGET_MODE
1803 free(isp->isp_tgtlist, M_DEVBUF);
1809 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1810 &isp->isp_cdmap) != 0) {
1811 isp_prt(isp, ISP_LOGERR,
1812 "cannot allocate %d bytes of CCB memory", len);
1813 bus_dma_tag_destroy(isp->isp_cdmat);
1814 free(isp->isp_xflist, M_DEVBUF);
1815 #ifdef ISP_TARGET_MODE
1816 free(isp->isp_tgtlist, M_DEVBUF);
1818 free(pcs->dmaps, M_DEVBUF);
1823 for (i = 0; i < isp->isp_maxcmds; i++) {
1824 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1826 isp_prt(isp, ISP_LOGERR,
1827 "error %d creating per-cmd DMA maps", error);
1829 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1837 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1839 isp_prt(isp, ISP_LOGERR,
1840 "error %d loading dma map for control areas", im.error);
1844 isp->isp_rquest = base;
1845 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1846 isp->isp_result = base;
1848 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1849 FCPARAM(isp)->isp_scratch = base;
1855 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1856 bus_dma_tag_destroy(isp->isp_cdmat);
1857 free(isp->isp_xflist, M_DEVBUF);
1858 #ifdef ISP_TARGET_MODE
1859 free(isp->isp_tgtlist, M_DEVBUF);
1861 free(pcs->dmaps, M_DEVBUF);
1863 isp->isp_rquest = NULL;
1876 #define MUSHERR_NOQENTRIES -2
1878 #ifdef ISP_TARGET_MODE
1880 * We need to handle DMA for target mode differently from initiator mode.
1882 * DMA mapping and construction and submission of CTIO Request Entries
1883 * and rendevous for completion are very tightly coupled because we start
1884 * out by knowing (per platform) how much data we have to move, but we
1885 * don't know, up front, how many DMA mapping segments will have to be used
1886 * cover that data, so we don't know how many CTIO Request Entries we
1887 * will end up using. Further, for performance reasons we may want to
1888 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1890 * The standard vector still goes through isp_pci_dmasetup, but the callback
1891 * for the DMA mapping routines comes here instead with the whole transfer
1892 * mapped and a pointer to a partially filled in already allocated request
1893 * queue entry. We finish the job.
1895 static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1896 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1898 #define STATUS_WITH_DATA 1
1901 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1904 struct ccb_scsiio *csio;
1906 struct isp_pcisoftc *pcs;
1908 ct_entry_t *cto, *qe;
1909 uint8_t scsi_status;
1910 uint32_t curi, nxti, handle;
1913 int nth_ctio, nctios, send_status;
1915 mp = (mush_t *) arg;
1922 csio = mp->cmd_token;
1924 curi = isp->isp_reqidx;
1925 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1928 cto->ct_seg_count = 0;
1929 cto->ct_header.rqs_entry_count = 1;
1930 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1933 cto->ct_header.rqs_seqno = 1;
1934 isp_prt(isp, ISP_LOGTDEBUG1,
1935 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1936 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1937 cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1938 cto->ct_scsi_status, cto->ct_resid);
1939 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1940 isp_put_ctio(isp, cto, qe);
1944 nctios = nseg / ISP_RQDSEG;
1945 if (nseg % ISP_RQDSEG) {
1950 * Save syshandle, and potentially any SCSI status, which we'll
1951 * reinsert on the last CTIO we're going to send.
1954 handle = cto->ct_syshandle;
1955 cto->ct_syshandle = 0;
1956 cto->ct_header.rqs_seqno = 0;
1957 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1960 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1961 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1963 * Preserve residual.
1965 resid = cto->ct_resid;
1968 * Save actual SCSI status.
1970 scsi_status = cto->ct_scsi_status;
1972 #ifndef STATUS_WITH_DATA
1973 sflags |= CT_NO_DATA;
1975 * We can't do a status at the same time as a data CTIO, so
1976 * we need to synthesize an extra CTIO at this level.
1981 sflags = scsi_status = resid = 0;
1985 cto->ct_scsi_status = 0;
1987 pcs = (struct isp_pcisoftc *)isp;
1988 dp = &pcs->dmaps[isp_handle_index(handle)];
1989 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1990 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1992 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1997 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
2004 if (seglim > ISP_RQDSEG)
2005 seglim = ISP_RQDSEG;
2007 for (seg = 0; seg < seglim; seg++, nseg--) {
2009 * Unlike normal initiator commands, we don't
2010 * do any swizzling here.
2012 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
2013 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
2014 cto->ct_xfrlen += dm_segs->ds_len;
2017 cto->ct_seg_count = seg;
2020 * This case should only happen when we're sending an
2021 * extra CTIO with final status.
2023 if (send_status == 0) {
2024 isp_prt(isp, ISP_LOGWARN,
2025 "tdma_mk ran out of segments");
2032 * At this point, the fields ct_lun, ct_iid, ct_tagval,
2033 * ct_tagtype, and ct_timeout have been carried over
2034 * unchanged from what our caller had set.
2036 * The dataseg fields and the seg_count fields we just got
2037 * through setting. The data direction we've preserved all
2038 * along and only clear it if we're now sending status.
2041 if (nth_ctio == nctios - 1) {
2043 * We're the last in a sequence of CTIOs, so mark
2044 * this CTIO and save the handle to the CCB such that
2045 * when this CTIO completes we can free dma resources
2046 * and do whatever else we need to do to finish the
2047 * rest of the command. We *don't* give this to the
2048 * firmware to work on- the caller will do that.
2051 cto->ct_syshandle = handle;
2052 cto->ct_header.rqs_seqno = 1;
2055 cto->ct_scsi_status = scsi_status;
2056 cto->ct_flags |= sflags;
2057 cto->ct_resid = resid;
2060 isp_prt(isp, ISP_LOGTDEBUG1,
2061 "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
2062 "scsi status %x resid %d",
2063 cto->ct_fwhandle, csio->ccb_h.target_lun,
2064 cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
2065 cto->ct_scsi_status, cto->ct_resid);
2067 isp_prt(isp, ISP_LOGTDEBUG1,
2068 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
2069 cto->ct_fwhandle, csio->ccb_h.target_lun,
2070 cto->ct_iid, cto->ct_tag_val,
2073 isp_put_ctio(isp, cto, qe);
2074 ISP_TDQE(isp, "last tdma_mk", curi, cto);
2076 MEMORYBARRIER(isp, SYNC_REQUEST,
2080 ct_entry_t *oqe = qe;
2083 * Make sure syshandle fields are clean
2085 cto->ct_syshandle = 0;
2086 cto->ct_header.rqs_seqno = 0;
2088 isp_prt(isp, ISP_LOGTDEBUG1,
2089 "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
2090 cto->ct_fwhandle, csio->ccb_h.target_lun,
2091 cto->ct_iid, cto->ct_flags);
2097 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2098 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
2099 if (nxti == mp->optr) {
2100 isp_prt(isp, ISP_LOGTDEBUG0,
2101 "Queue Overflow in tdma_mk");
2102 mp->error = MUSHERR_NOQENTRIES;
2107 * Now that we're done with the old CTIO,
2108 * flush it out to the request queue.
2110 ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
2111 isp_put_ctio(isp, cto, oqe);
2112 if (nth_ctio != 0) {
2113 MEMORYBARRIER(isp, SYNC_REQUEST, curi,
2116 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
2119 * Reset some fields in the CTIO so we can reuse
2120 * for the next one we'll flush to the request
2123 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
2124 cto->ct_header.rqs_entry_count = 1;
2125 cto->ct_header.rqs_flags = 0;
2127 cto->ct_scsi_status = 0;
2130 cto->ct_seg_count = 0;
2131 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
2138 * We don't have to do multiple CTIOs here. Instead, we can just do
2139 * continuation segments as needed. This greatly simplifies the code
2140 * improves performance.
2144 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2147 struct ccb_scsiio *csio;
2149 ct2_entry_t *cto, *qe;
2150 uint32_t curi, nxti;
2155 mp = (mush_t *) arg;
2162 csio = mp->cmd_token;
2165 curi = isp->isp_reqidx;
2166 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
2169 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
2170 isp_prt(isp, ISP_LOGWARN,
2171 "dma2_tgt_fc, a status CTIO2 without MODE1 "
2172 "set (0x%x)", cto->ct_flags);
2177 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
2178 * flags to NO DATA and clear relative offset flags.
2179 * We preserve the ct_resid and the response area.
2181 cto->ct_header.rqs_seqno = 1;
2182 cto->ct_seg_count = 0;
2184 isp_prt(isp, ISP_LOGTDEBUG1,
2185 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
2186 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
2187 cto->ct_iid, cto->ct_flags, cto->ct_status,
2188 cto->rsp.m1.ct_scsi_status, cto->ct_resid);
2189 if (FCPARAM(isp)->isp_2klogin) {
2191 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
2193 isp_put_ctio2(isp, cto, qe);
2195 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
2199 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
2200 isp_prt(isp, ISP_LOGERR,
2201 "dma2_tgt_fc, a data CTIO2 without MODE0 set "
2202 "(0x%x)", cto->ct_flags);
2211 * Check to see if we need to DAC addressing or not.
2213 * Any address that's over the 4GB boundary causes this
2217 if (sizeof (bus_addr_t) > 4) {
2218 for (segcnt = 0; segcnt < nseg; segcnt++) {
2219 uint64_t addr = dm_segs[segcnt].ds_addr;
2220 if (addr >= 0x100000000LL) {
2225 if (segcnt != nseg) {
2226 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3;
2227 seglim = ISP_RQDSEG_T3;
2228 ds64 = &cto->rsp.m0.u.ct_dataseg64[0];
2231 seglim = ISP_RQDSEG_T2;
2233 ds = &cto->rsp.m0.u.ct_dataseg[0];
2235 cto->ct_seg_count = 0;
2238 * Set up the CTIO2 data segments.
2240 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg;
2241 cto->ct_seg_count++, segcnt++) {
2244 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
2245 ds64->ds_base = dm_segs[segcnt].ds_addr;
2246 ds64->ds_count = dm_segs[segcnt].ds_len;
2249 ds->ds_base = dm_segs[segcnt].ds_addr;
2250 ds->ds_count = dm_segs[segcnt].ds_len;
2253 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
2254 #if __FreeBSD_version < 500000
2255 isp_prt(isp, ISP_LOGTDEBUG1,
2256 "isp_send_ctio2: ent0[%d]0x%llx:%llu",
2257 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr,
2258 (uint64_t)dm_segs[segcnt].ds_len);
2260 isp_prt(isp, ISP_LOGTDEBUG1,
2261 "isp_send_ctio2: ent0[%d]0x%jx:%ju",
2262 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr,
2263 (uintmax_t)dm_segs[segcnt].ds_len);
2267 while (segcnt < nseg) {
2270 ispcontreq_t local, *crq = &local, *qep;
2272 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2274 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
2275 if (nxti == mp->optr) {
2277 isp_prt(isp, ISP_LOGTDEBUG0,
2278 "tdma_mkfc: request queue overflow");
2279 mp->error = MUSHERR_NOQENTRIES;
2282 cto->ct_header.rqs_entry_count++;
2283 MEMZERO((void *)crq, sizeof (*crq));
2284 crq->req_header.rqs_entry_count = 1;
2285 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) {
2286 seglim = ISP_CDSEG64;
2288 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0];
2289 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2292 ds = &crq->req_dataseg[0];
2294 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2296 for (seg = 0; segcnt < nseg && seg < seglim;
2300 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
2301 ds64->ds_base = dm_segs[segcnt].ds_addr;
2302 ds64->ds_count = dm_segs[segcnt].ds_len;
2305 ds->ds_base = dm_segs[segcnt].ds_addr;
2306 ds->ds_count = dm_segs[segcnt].ds_len;
2309 #if __FreeBSD_version < 500000
2310 isp_prt(isp, ISP_LOGTDEBUG1,
2311 "isp_send_ctio2: ent%d[%d]%llx:%llu",
2312 cto->ct_header.rqs_entry_count-1, seg,
2313 (uint64_t)dm_segs[segcnt].ds_addr,
2314 (uint64_t)dm_segs[segcnt].ds_len);
2316 isp_prt(isp, ISP_LOGTDEBUG1,
2317 "isp_send_ctio2: ent%d[%d]%jx:%ju",
2318 cto->ct_header.rqs_entry_count-1, seg,
2319 (uintmax_t)dm_segs[segcnt].ds_addr,
2320 (uintmax_t)dm_segs[segcnt].ds_len);
2322 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
2323 cto->ct_seg_count++;
2325 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
2326 isp_put_cont_req(isp, crq, qep);
2327 ISP_TDQE(isp, "cont entry", curi, qep);
2331 * No do final twiddling for the CTIO itself.
2333 cto->ct_header.rqs_seqno = 1;
2334 isp_prt(isp, ISP_LOGTDEBUG1,
2335 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
2336 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
2337 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
2339 if (FCPARAM(isp)->isp_2klogin) {
2340 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
2342 isp_put_ctio2(isp, cto, qe);
2344 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
2349 static void dma_2400(void *, bus_dma_segment_t *, int, int);
2350 static void dma2_a64(void *, bus_dma_segment_t *, int, int);
2351 static void dma2(void *, bus_dma_segment_t *, int, int);
2354 dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2358 struct ccb_scsiio *csio;
2359 struct isp_pcisoftc *pcs;
2361 bus_dma_segment_t *eseg;
2363 int seglim, datalen;
2366 mp = (mush_t *) arg;
2373 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2378 csio = mp->cmd_token;
2381 pcs = (struct isp_pcisoftc *)mp->isp;
2382 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2385 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2386 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2388 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2390 datalen = XS_XFRLEN(csio);
2393 * We're passed an initial partially filled in entry that
2394 * has most fields filled in except for data transfer
2397 * Our job is to fill in the initial request queue entry and
2398 * then to start allocating and filling in continuation entries
2399 * until we've covered the entire transfer.
2402 rq->req_header.rqs_entry_type = RQSTYPE_T7RQS;
2403 rq->req_dl = datalen;
2404 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2405 rq->req_alen_datadir = 0x2;
2407 rq->req_alen_datadir = 0x1;
2410 eseg = dm_segs + nseg;
2412 rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr);
2413 rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr);
2414 rq->req_dataseg.ds_count = dm_segs->ds_len;
2416 datalen -= dm_segs->ds_len;
2419 rq->req_seg_count++;
2421 while (datalen > 0 && dm_segs != eseg) {
2423 ispcontreq64_t local, *crq = &local, *cqe;
2425 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2427 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2428 if (nxti == mp->optr) {
2429 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2430 mp->error = MUSHERR_NOQENTRIES;
2433 rq->req_header.rqs_entry_count++;
2434 MEMZERO((void *)crq, sizeof (*crq));
2435 crq->req_header.rqs_entry_count = 1;
2436 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2439 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
2440 crq->req_dataseg[seglim].ds_base =
2441 DMA_LO32(dm_segs->ds_addr);
2442 crq->req_dataseg[seglim].ds_basehi =
2443 DMA_HI32(dm_segs->ds_addr);
2444 crq->req_dataseg[seglim].ds_count =
2446 rq->req_seg_count++;
2449 datalen -= dm_segs->ds_len;
2451 if (isp->isp_dblev & ISP_LOGDEBUG1) {
2452 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq);
2454 isp_put_cont64_req(isp, crq, cqe);
2455 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2461 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2465 struct ccb_scsiio *csio;
2466 struct isp_pcisoftc *pcs;
2468 bus_dma_segment_t *eseg;
2470 int seglim, datalen;
2473 mp = (mush_t *) arg;
2480 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2484 csio = mp->cmd_token;
2487 pcs = (struct isp_pcisoftc *)mp->isp;
2488 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2491 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2492 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2494 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2496 datalen = XS_XFRLEN(csio);
2499 * We're passed an initial partially filled in entry that
2500 * has most fields filled in except for data transfer
2503 * Our job is to fill in the initial request queue entry and
2504 * then to start allocating and filling in continuation entries
2505 * until we've covered the entire transfer.
2509 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
2510 seglim = ISP_RQDSEG_T3;
2511 ((ispreqt3_t *)rq)->req_totalcnt = datalen;
2512 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2513 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2515 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2518 rq->req_header.rqs_entry_type = RQSTYPE_A64;
2519 if (csio->cdb_len > 12) {
2522 seglim = ISP_RQDSEG_A64;
2524 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2525 rq->req_flags |= REQFLAG_DATA_IN;
2527 rq->req_flags |= REQFLAG_DATA_OUT;
2531 eseg = dm_segs + nseg;
2533 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2535 ispreqt3_t *rq3 = (ispreqt3_t *)rq;
2536 rq3->req_dataseg[rq3->req_seg_count].ds_base =
2537 DMA_LO32(dm_segs->ds_addr);
2538 rq3->req_dataseg[rq3->req_seg_count].ds_basehi =
2539 DMA_HI32(dm_segs->ds_addr);
2540 rq3->req_dataseg[rq3->req_seg_count].ds_count =
2543 rq->req_dataseg[rq->req_seg_count].ds_base =
2544 DMA_LO32(dm_segs->ds_addr);
2545 rq->req_dataseg[rq->req_seg_count].ds_basehi =
2546 DMA_HI32(dm_segs->ds_addr);
2547 rq->req_dataseg[rq->req_seg_count].ds_count =
2550 datalen -= dm_segs->ds_len;
2551 rq->req_seg_count++;
2555 while (datalen > 0 && dm_segs != eseg) {
2557 ispcontreq64_t local, *crq = &local, *cqe;
2559 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2561 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2562 if (nxti == mp->optr) {
2563 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2564 mp->error = MUSHERR_NOQENTRIES;
2567 rq->req_header.rqs_entry_count++;
2568 MEMZERO((void *)crq, sizeof (*crq));
2569 crq->req_header.rqs_entry_count = 1;
2570 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2573 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
2574 crq->req_dataseg[seglim].ds_base =
2575 DMA_LO32(dm_segs->ds_addr);
2576 crq->req_dataseg[seglim].ds_basehi =
2577 DMA_HI32(dm_segs->ds_addr);
2578 crq->req_dataseg[seglim].ds_count =
2580 rq->req_seg_count++;
2583 datalen -= dm_segs->ds_len;
2585 if (isp->isp_dblev & ISP_LOGDEBUG1) {
2586 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq);
2588 isp_put_cont64_req(isp, crq, cqe);
2589 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2595 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2599 struct ccb_scsiio *csio;
2600 struct isp_pcisoftc *pcs;
2602 bus_dma_segment_t *eseg;
2604 int seglim, datalen;
2607 mp = (mush_t *) arg;
2614 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2618 csio = mp->cmd_token;
2621 pcs = (struct isp_pcisoftc *)mp->isp;
2622 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2625 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2626 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2628 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2631 datalen = XS_XFRLEN(csio);
2634 * We're passed an initial partially filled in entry that
2635 * has most fields filled in except for data transfer
2638 * Our job is to fill in the initial request queue entry and
2639 * then to start allocating and filling in continuation entries
2640 * until we've covered the entire transfer.
2644 seglim = ISP_RQDSEG_T2;
2645 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
2646 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2647 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2649 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2652 if (csio->cdb_len > 12) {
2655 seglim = ISP_RQDSEG;
2657 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2658 rq->req_flags |= REQFLAG_DATA_IN;
2660 rq->req_flags |= REQFLAG_DATA_OUT;
2664 eseg = dm_segs + nseg;
2666 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2668 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
2669 rq2->req_dataseg[rq2->req_seg_count].ds_base =
2670 DMA_LO32(dm_segs->ds_addr);
2671 rq2->req_dataseg[rq2->req_seg_count].ds_count =
2674 rq->req_dataseg[rq->req_seg_count].ds_base =
2675 DMA_LO32(dm_segs->ds_addr);
2676 rq->req_dataseg[rq->req_seg_count].ds_count =
2679 datalen -= dm_segs->ds_len;
2680 rq->req_seg_count++;
2684 while (datalen > 0 && dm_segs != eseg) {
2686 ispcontreq_t local, *crq = &local, *cqe;
2688 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2690 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2691 if (nxti == mp->optr) {
2692 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2693 mp->error = MUSHERR_NOQENTRIES;
2696 rq->req_header.rqs_entry_count++;
2697 MEMZERO((void *)crq, sizeof (*crq));
2698 crq->req_header.rqs_entry_count = 1;
2699 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2702 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
2703 crq->req_dataseg[seglim].ds_base =
2704 DMA_LO32(dm_segs->ds_addr);
2705 crq->req_dataseg[seglim].ds_count =
2707 rq->req_seg_count++;
2710 datalen -= dm_segs->ds_len;
2712 if (isp->isp_dblev & ISP_LOGDEBUG1) {
2713 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq);
2715 isp_put_cont_req(isp, crq, cqe);
2716 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2722 * We enter with ISP_LOCK held
2725 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq,
2726 uint32_t *nxtip, uint32_t optr)
2728 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2730 bus_dmamap_t *dp = NULL;
2732 void (*eptr)(void *, bus_dma_segment_t *, int, int);
2734 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
2735 #ifdef ISP_TARGET_MODE
2736 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
2742 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2743 (csio->dxfer_len == 0)) {
2746 mp->cmd_token = csio;
2747 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */
2751 ISPLOCK_2_CAMLOCK(isp);
2752 (*eptr)(mp, NULL, 0, 0);
2753 CAMLOCK_2_ISPLOCK(isp);
2760 } else if (sizeof (bus_addr_t) > 4) {
2767 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2768 (csio->dxfer_len == 0)) {
2769 rq->req_seg_count = 1;
2774 * Do a virtual grapevine step to collect info for
2775 * the callback dma allocation that we have to use...
2779 mp->cmd_token = csio;
2785 ISPLOCK_2_CAMLOCK(isp);
2786 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2787 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2789 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2791 error = bus_dmamap_load(pcs->dmat, *dp,
2792 csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
2793 if (error == EINPROGRESS) {
2794 bus_dmamap_unload(pcs->dmat, *dp);
2796 isp_prt(isp, ISP_LOGERR,
2797 "deferred dma allocation not supported");
2798 } else if (error && mp->error == 0) {
2800 isp_prt(isp, ISP_LOGERR,
2801 "error %d in dma mapping code", error);
2807 /* Pointer to physical buffer */
2808 struct bus_dma_segment seg;
2809 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
2810 seg.ds_len = csio->dxfer_len;
2811 (*eptr)(mp, &seg, 1, 0);
2814 struct bus_dma_segment *segs;
2816 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2817 isp_prt(isp, ISP_LOGERR,
2818 "Physical segment pointers unsupported");
2820 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2821 isp_prt(isp, ISP_LOGERR,
2822 "Virtual segment addresses unsupported");
2825 /* Just use the segments provided */
2826 segs = (struct bus_dma_segment *) csio->data_ptr;
2827 (*eptr)(mp, segs, csio->sglist_cnt, 0);
2830 CAMLOCK_2_ISPLOCK(isp);
2832 int retval = CMD_COMPLETE;
2833 if (mp->error == MUSHERR_NOQENTRIES) {
2834 retval = CMD_EAGAIN;
2835 } else if (mp->error == EFBIG) {
2836 XS_SETERR(csio, CAM_REQ_TOO_BIG);
2837 } else if (mp->error == EINVAL) {
2838 XS_SETERR(csio, CAM_REQ_INVALID);
2840 XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
2845 if (isp->isp_dblev & ISP_LOGDEBUG1) {
2846 isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq);
2848 switch (rq->req_header.rqs_entry_type) {
2849 case RQSTYPE_REQUEST:
2850 isp_put_request(isp, rq, qep);
2852 case RQSTYPE_CMDONLY:
2853 isp_put_extended_request(isp, (ispextreq_t *)rq,
2854 (ispextreq_t *)qep);
2857 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
2861 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
2864 isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep);
2867 return (CMD_QUEUED);
2871 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle)
2873 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2874 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
2875 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2876 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
2878 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
2880 bus_dmamap_unload(pcs->dmat, *dp);
2885 isp_pci_reset1(ispsoftc_t *isp)
2887 if (!IS_24XX(isp)) {
2888 /* Make sure the BIOS is disabled */
2889 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2891 /* and enable interrupts */
2892 ISP_ENABLE_INTS(isp);
2896 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
2898 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2900 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2902 printf("%s:\n", device_get_nameunit(isp->isp_dev));
2904 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2906 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2907 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2908 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2909 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2913 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2914 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2915 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2916 ISP_READ(isp, CDMA_FIFO_STS));
2917 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2918 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2919 ISP_READ(isp, DDMA_FIFO_STS));
2920 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2921 ISP_READ(isp, SXP_INTERRUPT),
2922 ISP_READ(isp, SXP_GROSS_ERR),
2923 ISP_READ(isp, SXP_PINS_CTRL));
2924 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2926 printf(" mbox regs: %x %x %x %x %x\n",
2927 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2928 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2929 ISP_READ(isp, OUTMAILBOX4));
2930 printf(" PCI Status Command/Status=%x\n",
2931 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));