3 * Copyright (c) 1997-2006 by Matthew Jacob
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice immediately at the beginning of the file, without modification,
11 * this list of conditions, and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #if __FreeBSD_version >= 700000
40 #include <sys/linker.h>
41 #include <sys/firmware.h>
44 #if __FreeBSD_version < 500000
45 #include <pci/pcireg.h>
46 #include <pci/pcivar.h>
47 #include <machine/bus_memio.h>
48 #include <machine/bus_pio.h>
50 #include <sys/stdint.h>
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
54 #include <machine/bus.h>
55 #include <machine/resource.h>
57 #include <sys/malloc.h>
59 #include <dev/isp/isp_freebsd.h>
61 #if __FreeBSD_version < 500000
62 #define BUS_PROBE_DEFAULT 0
65 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int);
66 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t);
67 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
68 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t);
69 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int);
70 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t);
72 isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
74 isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
76 isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *);
77 static int isp_pci_mbxdma(ispsoftc_t *);
79 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t);
81 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint32_t);
84 static void isp_pci_reset0(ispsoftc_t *);
85 static void isp_pci_reset1(ispsoftc_t *);
86 static void isp_pci_dumpregs(ispsoftc_t *, const char *);
88 static struct ispmdvec mdvec = {
99 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
102 static struct ispmdvec mdvec_1080 = {
113 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
116 static struct ispmdvec mdvec_12160 = {
127 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
130 static struct ispmdvec mdvec_2100 = {
142 static struct ispmdvec mdvec_2200 = {
154 static struct ispmdvec mdvec_2300 = {
166 static struct ispmdvec mdvec_2400 = {
178 #ifndef PCIM_CMD_INVEN
179 #define PCIM_CMD_INVEN 0x10
181 #ifndef PCIM_CMD_BUSMASTEREN
182 #define PCIM_CMD_BUSMASTEREN 0x0004
184 #ifndef PCIM_CMD_PERRESPEN
185 #define PCIM_CMD_PERRESPEN 0x0040
187 #ifndef PCIM_CMD_SEREN
188 #define PCIM_CMD_SEREN 0x0100
190 #ifndef PCIM_CMD_INTX_DISABLE
191 #define PCIM_CMD_INTX_DISABLE 0x0400
195 #define PCIR_COMMAND 0x04
198 #ifndef PCIR_CACHELNSZ
199 #define PCIR_CACHELNSZ 0x0c
202 #ifndef PCIR_LATTIMER
203 #define PCIR_LATTIMER 0x0d
207 #define PCIR_ROMADDR 0x30
210 #ifndef PCI_VENDOR_QLOGIC
211 #define PCI_VENDOR_QLOGIC 0x1077
214 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
215 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
218 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
219 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
222 #ifndef PCI_PRODUCT_QLOGIC_ISP10160
223 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
226 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
227 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
230 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
231 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
234 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
235 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
238 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
239 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
242 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
243 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
246 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
247 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
250 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
251 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
254 #ifndef PCI_PRODUCT_QLOGIC_ISP2322
255 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322
258 #ifndef PCI_PRODUCT_QLOGIC_ISP2422
259 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422
262 #ifndef PCI_PRODUCT_QLOGIC_ISP2432
263 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432
266 #ifndef PCI_PRODUCT_QLOGIC_ISP6312
267 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312
270 #ifndef PCI_PRODUCT_QLOGIC_ISP6322
271 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322
275 #define PCI_QLOGIC_ISP1020 \
276 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
278 #define PCI_QLOGIC_ISP1080 \
279 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
281 #define PCI_QLOGIC_ISP10160 \
282 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
284 #define PCI_QLOGIC_ISP12160 \
285 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
287 #define PCI_QLOGIC_ISP1240 \
288 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
290 #define PCI_QLOGIC_ISP1280 \
291 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
293 #define PCI_QLOGIC_ISP2100 \
294 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
296 #define PCI_QLOGIC_ISP2200 \
297 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
299 #define PCI_QLOGIC_ISP2300 \
300 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
302 #define PCI_QLOGIC_ISP2312 \
303 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
305 #define PCI_QLOGIC_ISP2322 \
306 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
308 #define PCI_QLOGIC_ISP2422 \
309 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
311 #define PCI_QLOGIC_ISP2432 \
312 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC)
314 #define PCI_QLOGIC_ISP6312 \
315 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
317 #define PCI_QLOGIC_ISP6322 \
318 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
321 * Odd case for some AMI raid cards... We need to *not* attach to this.
323 #define AMI_RAID_SUBVENDOR_ID 0x101e
325 #define IO_MAP_REG 0x10
326 #define MEM_MAP_REG 0x14
328 #define PCI_DFLT_LTNCY 0x40
329 #define PCI_DFLT_LNSZ 0x10
331 static int isp_pci_probe (device_t);
332 static int isp_pci_attach (device_t);
333 static int isp_pci_detach (device_t);
336 struct isp_pcisoftc {
339 struct resource * pci_reg;
340 bus_space_tag_t pci_st;
341 bus_space_handle_t pci_sh;
343 int16_t pci_poff[_NREG_BLKS];
349 static device_method_t isp_pci_methods[] = {
350 /* Device interface */
351 DEVMETHOD(device_probe, isp_pci_probe),
352 DEVMETHOD(device_attach, isp_pci_attach),
353 DEVMETHOD(device_detach, isp_pci_detach),
356 static void isp_pci_intr(void *);
358 static driver_t isp_pci_driver = {
359 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
361 static devclass_t isp_devclass;
362 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
363 #if __FreeBSD_version < 700000
364 extern ispfwfunc *isp_get_firmware_p;
368 isp_pci_probe(device_t dev)
370 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
371 case PCI_QLOGIC_ISP1020:
372 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
374 case PCI_QLOGIC_ISP1080:
375 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
377 case PCI_QLOGIC_ISP1240:
378 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
380 case PCI_QLOGIC_ISP1280:
381 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
383 case PCI_QLOGIC_ISP10160:
384 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
386 case PCI_QLOGIC_ISP12160:
387 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
390 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
392 case PCI_QLOGIC_ISP2100:
393 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
395 case PCI_QLOGIC_ISP2200:
396 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
398 case PCI_QLOGIC_ISP2300:
399 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
401 case PCI_QLOGIC_ISP2312:
402 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
404 case PCI_QLOGIC_ISP2322:
405 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
407 case PCI_QLOGIC_ISP2422:
408 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
410 case PCI_QLOGIC_ISP2432:
411 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter");
413 case PCI_QLOGIC_ISP6312:
414 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
416 case PCI_QLOGIC_ISP6322:
417 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
422 if (isp_announced == 0 && bootverbose) {
423 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
424 "Core Version %d.%d\n",
425 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
426 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
430 * XXXX: Here is where we might load the f/w module
431 * XXXX: (or increase a reference count to it).
433 return (BUS_PROBE_DEFAULT);
436 #if __FreeBSD_version < 500000
438 isp_get_options(device_t dev, ispsoftc_t *isp)
443 callout_handle_init(&isp->isp_osinfo.ldt);
444 callout_handle_init(&isp->isp_osinfo.gdt);
446 unit = device_get_unit(dev);
447 if (getenv_int("isp_disable", &bitmap)) {
448 if (bitmap & (1 << unit)) {
449 isp->isp_osinfo.disabled = 1;
454 if (getenv_int("isp_no_fwload", &bitmap)) {
455 if (bitmap & (1 << unit))
456 isp->isp_confopts |= ISP_CFG_NORELOAD;
458 if (getenv_int("isp_fwload", &bitmap)) {
459 if (bitmap & (1 << unit))
460 isp->isp_confopts &= ~ISP_CFG_NORELOAD;
462 if (getenv_int("isp_no_nvram", &bitmap)) {
463 if (bitmap & (1 << unit))
464 isp->isp_confopts |= ISP_CFG_NONVRAM;
466 if (getenv_int("isp_nvram", &bitmap)) {
467 if (bitmap & (1 << unit))
468 isp->isp_confopts &= ~ISP_CFG_NONVRAM;
470 if (getenv_int("isp_fcduplex", &bitmap)) {
471 if (bitmap & (1 << unit))
472 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
474 if (getenv_int("isp_no_fcduplex", &bitmap)) {
475 if (bitmap & (1 << unit))
476 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
478 if (getenv_int("isp_nport", &bitmap)) {
479 if (bitmap & (1 << unit))
480 isp->isp_confopts |= ISP_CFG_NPORT;
484 * Because the resource_*_value functions can neither return
485 * 64 bit integer values, nor can they be directly coerced
486 * to interpret the right hand side of the assignment as
487 * you want them to interpret it, we have to force WWN
488 * hint replacement to specify WWN strings with a leading
489 * 'w' (e..g w50000000aaaa0001). Sigh.
491 if (getenv_quad("isp_portwwn", &wwn)) {
492 isp->isp_osinfo.default_port_wwn = wwn;
493 isp->isp_confopts |= ISP_CFG_OWNWWPN;
495 if (isp->isp_osinfo.default_port_wwn == 0) {
496 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
499 if (getenv_quad("isp_nodewwn", &wwn)) {
500 isp->isp_osinfo.default_node_wwn = wwn;
501 isp->isp_confopts |= ISP_CFG_OWNWWNN;
503 if (isp->isp_osinfo.default_node_wwn == 0) {
504 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
508 (void) getenv_int("isp_debug", &bitmap);
510 isp->isp_dblev = bitmap;
512 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
515 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
519 (void) getenv_int("isp_fabric_hysteresis", &bitmap);
520 if (bitmap >= 0 && bitmap < 256) {
521 isp->isp_osinfo.hysteresis = bitmap;
523 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis;
527 (void) getenv_int("isp_loop_down_limit", &bitmap);
528 if (bitmap >= 0 && bitmap < 0xffff) {
529 isp->isp_osinfo.loop_down_limit = bitmap;
531 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit;
535 (void) getenv_int("isp_gone_device_time", &bitmap);
536 if (bitmap >= 0 && bitmap < 0xffff) {
537 isp->isp_osinfo.gone_device_time = bitmap;
539 isp->isp_osinfo.gone_device_time = isp_gone_device_time;
543 #ifdef ISP_FW_CRASH_DUMP
545 if (getenv_int("isp_fw_dump_enable", &bitmap)) {
546 if (bitmap & (1 << unit) {
549 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
550 } else if (IS_23XX(isp)) {
551 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
554 FCPARAM(isp)->isp_dump_data =
555 malloc(amt, M_DEVBUF, M_WAITOK);
556 memset(FCPARAM(isp)->isp_dump_data, 0, amt);
559 "f/w crash dumps not supported for card\n");
565 if (getenv_int("role", &bitmap)) {
566 isp->isp_role = bitmap;
568 isp->isp_role = ISP_DEFAULT_ROLES;
573 isp_get_pci_options(device_t dev, int *m1, int *m2)
576 int unit = device_get_unit(dev);
578 *m1 = PCIM_CMD_MEMEN;
579 *m2 = PCIM_CMD_PORTEN;
580 if (getenv_int("isp_mem_map", &bitmap)) {
581 if (bitmap & (1 << unit)) {
582 *m1 = PCIM_CMD_MEMEN;
583 *m2 = PCIM_CMD_PORTEN;
587 if (getenv_int("isp_io_map", &bitmap)) {
588 if (bitmap & (1 << unit)) {
589 *m1 = PCIM_CMD_PORTEN;
590 *m2 = PCIM_CMD_MEMEN;
596 isp_get_options(device_t dev, ispsoftc_t *isp)
601 callout_handle_init(&isp->isp_osinfo.ldt);
602 callout_handle_init(&isp->isp_osinfo.gdt);
605 * Figure out if we're supposed to skip this one.
609 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
610 "disable", &tval) == 0 && tval) {
611 device_printf(dev, "disabled at user request\n");
612 isp->isp_osinfo.disabled = 1;
617 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
618 "role", &tval) == 0 && tval != -1) {
619 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET);
620 isp->isp_role = tval;
621 device_printf(dev, "setting role to 0x%x\n", isp->isp_role);
623 #ifdef ISP_TARGET_MODE
624 isp->isp_role = ISP_ROLE_TARGET;
626 isp->isp_role = ISP_DEFAULT_ROLES;
631 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
632 "fwload_disable", &tval) == 0 && tval != 0) {
633 isp->isp_confopts |= ISP_CFG_NORELOAD;
636 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
637 "ignore_nvram", &tval) == 0 && tval != 0) {
638 isp->isp_confopts |= ISP_CFG_NONVRAM;
641 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
642 "fullduplex", &tval) == 0 && tval != 0) {
643 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
645 #ifdef ISP_FW_CRASH_DUMP
647 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
648 "fw_dump_enable", &tval) == 0 && tval != 0) {
651 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
652 } else if (IS_23XX(isp)) {
653 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
656 FCPARAM(isp)->isp_dump_data =
657 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
660 "f/w crash dumps not supported for this model\n");
666 if (resource_string_value(device_get_name(dev), device_get_unit(dev),
667 "topology", (const char **) &sptr) == 0 && sptr != 0) {
668 if (strcmp(sptr, "lport") == 0) {
669 isp->isp_confopts |= ISP_CFG_LPORT;
670 } else if (strcmp(sptr, "nport") == 0) {
671 isp->isp_confopts |= ISP_CFG_NPORT;
672 } else if (strcmp(sptr, "lport-only") == 0) {
673 isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
674 } else if (strcmp(sptr, "nport-only") == 0) {
675 isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
680 * Because the resource_*_value functions can neither return
681 * 64 bit integer values, nor can they be directly coerced
682 * to interpret the right hand side of the assignment as
683 * you want them to interpret it, we have to force WWN
684 * hint replacement to specify WWN strings with a leading
685 * 'w' (e..g w50000000aaaa0001). Sigh.
688 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
689 "portwwn", (const char **) &sptr);
690 if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
692 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
693 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
694 device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
695 isp->isp_osinfo.default_port_wwn = 0;
697 isp->isp_confopts |= ISP_CFG_OWNWWPN;
700 if (isp->isp_osinfo.default_port_wwn == 0) {
701 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
705 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
706 "nodewwn", (const char **) &sptr);
707 if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
709 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
710 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
711 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
712 isp->isp_osinfo.default_node_wwn = 0;
714 isp->isp_confopts |= ISP_CFG_OWNWWNN;
717 if (isp->isp_osinfo.default_node_wwn == 0) {
718 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
721 isp->isp_osinfo.default_id = -1;
722 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
723 "iid", &tval) == 0) {
724 isp->isp_osinfo.default_id = tval;
725 isp->isp_confopts |= ISP_CFG_OWNLOOPID;
727 if (isp->isp_osinfo.default_id == -1) {
729 isp->isp_osinfo.default_id = 109;
731 isp->isp_osinfo.default_id = 7;
736 * Set up logging levels.
739 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
742 isp->isp_dblev = tval;
744 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
747 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
751 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
752 "hysteresis", &tval);
753 if (tval >= 0 && tval < 256) {
754 isp->isp_osinfo.hysteresis = tval;
756 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis;
760 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
761 "loop_down_limit", &tval);
762 if (tval >= 0 && tval < 0xffff) {
763 isp->isp_osinfo.loop_down_limit = tval;
765 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit;
769 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
770 "gone_device_time", &tval);
771 if (tval >= 0 && tval < 0xffff) {
772 isp->isp_osinfo.gone_device_time = tval;
774 isp->isp_osinfo.gone_device_time = isp_gone_device_time;
779 isp_get_pci_options(device_t dev, int *m1, int *m2)
783 * Which we should try first - memory mapping or i/o mapping?
785 * We used to try memory first followed by i/o on alpha, otherwise
786 * the reverse, but we should just try memory first all the time now.
788 *m1 = PCIM_CMD_MEMEN;
789 *m2 = PCIM_CMD_PORTEN;
792 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
793 "prefer_iomap", &tval) == 0 && tval != 0) {
794 *m1 = PCIM_CMD_PORTEN;
795 *m2 = PCIM_CMD_MEMEN;
798 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
799 "prefer_memmap", &tval) == 0 && tval != 0) {
800 *m1 = PCIM_CMD_MEMEN;
801 *m2 = PCIM_CMD_PORTEN;
807 isp_pci_attach(device_t dev)
809 struct resource *regs, *irq;
810 int rtp, rgd, iqd, m1, m2;
811 uint32_t data, cmd, linesz, psize, basetype;
812 struct isp_pcisoftc *pcs;
813 ispsoftc_t *isp = NULL;
814 struct ispmdvec *mdvp;
815 #if __FreeBSD_version >= 500000
819 pcs = device_get_softc(dev);
821 device_printf(dev, "cannot get softc\n");
824 memset(pcs, 0, sizeof (*pcs));
829 * Set and Get Generic Options
831 isp_get_options(dev, isp);
834 * Check to see if options have us disabled
836 if (isp->isp_osinfo.disabled) {
838 * But return zero to preserve unit numbering
844 * Get PCI options- which in this case are just mapping preferences.
846 isp_get_pci_options(dev, &m1, &m2);
848 linesz = PCI_DFLT_LNSZ;
852 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
854 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
855 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
856 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
858 if (regs == NULL && (cmd & m2)) {
859 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
860 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
861 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
864 device_printf(dev, "unable to map any ports\n");
868 device_printf(dev, "using %s space register mapping\n",
869 (rgd == IO_MAP_REG)? "I/O" : "Memory");
873 pcs->pci_st = rman_get_bustag(regs);
874 pcs->pci_sh = rman_get_bushandle(regs);
876 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
877 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
878 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
879 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
880 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
882 basetype = ISP_HA_SCSI_UNKNOWN;
883 psize = sizeof (sdparam);
884 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
886 basetype = ISP_HA_SCSI_UNKNOWN;
887 psize = sizeof (sdparam);
889 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
891 basetype = ISP_HA_SCSI_1080;
892 psize = sizeof (sdparam);
893 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
894 ISP1080_DMA_REGS_OFF;
896 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
898 basetype = ISP_HA_SCSI_1240;
899 psize = 2 * sizeof (sdparam);
900 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
901 ISP1080_DMA_REGS_OFF;
903 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
905 basetype = ISP_HA_SCSI_1280;
906 psize = 2 * sizeof (sdparam);
907 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
908 ISP1080_DMA_REGS_OFF;
910 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
912 basetype = ISP_HA_SCSI_10160;
913 psize = sizeof (sdparam);
914 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
915 ISP1080_DMA_REGS_OFF;
917 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
919 basetype = ISP_HA_SCSI_12160;
920 psize = 2 * sizeof (sdparam);
921 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
922 ISP1080_DMA_REGS_OFF;
924 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
926 basetype = ISP_HA_FC_2100;
927 psize = sizeof (fcparam);
928 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
929 PCI_MBOX_REGS2100_OFF;
930 if (pci_get_revid(dev) < 3) {
932 * XXX: Need to get the actual revision
933 * XXX: number of the 2100 FB. At any rate,
934 * XXX: lower cache line size for early revision
940 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
942 basetype = ISP_HA_FC_2200;
943 psize = sizeof (fcparam);
944 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
945 PCI_MBOX_REGS2100_OFF;
947 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
949 basetype = ISP_HA_FC_2300;
950 psize = sizeof (fcparam);
951 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
952 PCI_MBOX_REGS2300_OFF;
954 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 ||
955 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
957 basetype = ISP_HA_FC_2312;
958 psize = sizeof (fcparam);
959 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
960 PCI_MBOX_REGS2300_OFF;
962 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 ||
963 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) {
965 basetype = ISP_HA_FC_2322;
966 psize = sizeof (fcparam);
967 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
968 PCI_MBOX_REGS2300_OFF;
970 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422 ||
971 pci_get_devid(dev) == PCI_QLOGIC_ISP2432) {
973 basetype = ISP_HA_FC_2400;
974 psize = sizeof (fcparam);
975 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
976 PCI_MBOX_REGS2400_OFF;
979 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
980 if (isp->isp_param == NULL) {
981 device_printf(dev, "cannot allocate parameter data\n");
984 isp->isp_mdvec = mdvp;
985 isp->isp_type = basetype;
986 isp->isp_revision = pci_get_revid(dev);
989 #if __FreeBSD_version >= 700000
991 * Try and find firmware for this device.
995 unsigned int did = pci_get_device(dev);
998 * Map a few pci ids to fw names
1001 case PCI_PRODUCT_QLOGIC_ISP1020:
1004 case PCI_PRODUCT_QLOGIC_ISP1240:
1007 case PCI_PRODUCT_QLOGIC_ISP10160:
1008 case PCI_PRODUCT_QLOGIC_ISP12160:
1011 case PCI_PRODUCT_QLOGIC_ISP6312:
1012 case PCI_PRODUCT_QLOGIC_ISP2312:
1015 case PCI_PRODUCT_QLOGIC_ISP6322:
1018 case PCI_PRODUCT_QLOGIC_ISP2422:
1019 case PCI_PRODUCT_QLOGIC_ISP2432:
1026 isp->isp_osinfo.fw = NULL;
1027 if (isp->isp_role & ISP_ROLE_TARGET) {
1028 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did);
1029 isp->isp_osinfo.fw = firmware_get(fwname);
1031 if (isp->isp_osinfo.fw == NULL) {
1032 snprintf(fwname, sizeof (fwname), "isp_%04x", did);
1033 isp->isp_osinfo.fw = firmware_get(fwname);
1035 if (isp->isp_osinfo.fw != NULL) {
1040 u.fred = isp->isp_osinfo.fw->data;
1041 isp->isp_mdvec->dv_ispfw = u.bob;
1045 if (isp_get_firmware_p) {
1046 int device = (int) pci_get_device(dev);
1047 #ifdef ISP_TARGET_MODE
1048 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
1050 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
1056 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
1059 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
1060 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
1062 if (IS_2300(isp)) { /* per QLogic errata */
1063 cmd &= ~PCIM_CMD_INVEN;
1066 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
1067 cmd &= ~PCIM_CMD_INTX_DISABLE;
1070 #ifdef WE_KNEW_WHAT_WE_WERE_DOING
1074 cmd &= ~PCIM_CMD_INTX_DISABLE;
1077 * Is this a PCI-X card? If so, set max read byte count.
1079 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) {
1083 pxcmd = pci_read_config(dev, reg, 2);
1086 pci_write_config(dev, reg, 2, pxcmd);
1090 * Is this a PCI Express card? If so, set max read byte count.
1092 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) {
1096 pectl = pci_read_config(dev, reg, 2);
1099 pci_write_config(dev, reg, 2, pectl);
1104 cmd &= ~PCIM_CMD_INTX_DISABLE;
1108 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
1111 * Make sure the Cache Line Size register is set sensibly.
1113 data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
1114 if (data != linesz) {
1115 data = PCI_DFLT_LNSZ;
1116 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
1117 pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
1121 * Make sure the Latency Timer is sane.
1123 data = pci_read_config(dev, PCIR_LATTIMER, 1);
1124 if (data < PCI_DFLT_LTNCY) {
1125 data = PCI_DFLT_LTNCY;
1126 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
1127 pci_write_config(dev, PCIR_LATTIMER, data, 1);
1131 * Make sure we've disabled the ROM.
1133 data = pci_read_config(dev, PCIR_ROMADDR, 4);
1135 pci_write_config(dev, PCIR_ROMADDR, data, 4);
1138 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
1139 RF_ACTIVE | RF_SHAREABLE);
1141 device_printf(dev, "could not allocate interrupt\n");
1145 #if __FreeBSD_version >= 500000
1146 /* Make sure the lock is set up. */
1147 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
1151 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) {
1152 device_printf(dev, "could not setup interrupt\n");
1157 * Last minute checks...
1159 if (IS_23XX(isp) || IS_24XX(isp)) {
1160 isp->isp_port = pci_get_function(dev);
1165 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
1167 isp->isp_touched = 1;
1171 * Make sure we're in reset state.
1175 if (isp->isp_state != ISP_RESETSTATE) {
1180 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
1186 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
1192 * XXXX: Here is where we might unload the f/w module
1193 * XXXX: (or decrease the reference count to it).
1201 if (pcs && pcs->ih) {
1202 (void) bus_teardown_intr(dev, irq, pcs->ih);
1205 #if __FreeBSD_version >= 500000
1206 if (locksetup && isp) {
1207 mtx_destroy(&isp->isp_osinfo.lock);
1212 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
1217 (void) bus_release_resource(dev, rtp, rgd, regs);
1221 if (pcs->pci_isp.isp_param) {
1222 #ifdef ISP_FW_CRASH_DUMP
1223 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) {
1224 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF);
1227 free(pcs->pci_isp.isp_param, M_DEVBUF);
1232 * XXXX: Here is where we might unload the f/w module
1233 * XXXX: (or decrease the reference count to it).
1239 isp_pci_detach(device_t dev)
1241 struct isp_pcisoftc *pcs;
1244 pcs = device_get_softc(dev);
1248 isp = (ispsoftc_t *) pcs;
1249 ISP_DISABLE_INTS(isp);
1254 isp_pci_intr(void *arg)
1256 ispsoftc_t *isp = arg;
1258 uint16_t sema, mbox;
1262 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1263 isp->isp_intbogus++;
1265 isp_intr(isp, isr, sema, mbox);
1271 #define IspVirt2Off(a, x) \
1272 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1273 _BLK_REG_SHFT] + ((x) & 0xfff))
1275 #define BXR2(pcs, off) \
1276 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
1277 #define BXW2(pcs, off, v) \
1278 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
1279 #define BXR4(pcs, off) \
1280 bus_space_read_4(pcs->pci_st, pcs->pci_sh, off)
1281 #define BXW4(pcs, off, v) \
1282 bus_space_write_4(pcs->pci_st, pcs->pci_sh, off, v)
1286 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp)
1288 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1289 uint32_t val0, val1;
1293 val0 = BXR2(pcs, IspVirt2Off(isp, off));
1294 val1 = BXR2(pcs, IspVirt2Off(isp, off));
1295 } while (val0 != val1 && ++i < 1000);
1304 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp,
1305 uint16_t *semap, uint16_t *mbp)
1307 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1311 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
1314 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
1318 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
1319 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
1321 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1322 isr &= INT_PENDING_MASK(isp);
1323 sema &= BIU_SEMA_LOCK;
1324 if (isr == 0 && sema == 0) {
1328 if ((*semap = sema) != 0) {
1330 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
1334 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
1341 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp,
1342 uint16_t *semap, uint16_t *mbox0p)
1344 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1348 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
1352 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU_R2HSTSLO));
1353 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1354 if ((r2hisr & BIU_R2HST_INTR) == 0) {
1358 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
1359 case ISPR2HST_ROM_MBX_OK:
1360 case ISPR2HST_ROM_MBX_FAIL:
1361 case ISPR2HST_MBX_OK:
1362 case ISPR2HST_MBX_FAIL:
1363 case ISPR2HST_ASYNC_EVENT:
1364 *isrp = r2hisr & 0xffff;
1365 *mbox0p = (r2hisr >> 16);
1368 case ISPR2HST_RIO_16:
1369 *isrp = r2hisr & 0xffff;
1370 *mbox0p = ASYNC_RIO1;
1373 case ISPR2HST_FPOST:
1374 *isrp = r2hisr & 0xffff;
1375 *mbox0p = ASYNC_CMD_CMPLT;
1378 case ISPR2HST_FPOST_CTIO:
1379 *isrp = r2hisr & 0xffff;
1380 *mbox0p = ASYNC_CTIO_DONE;
1383 case ISPR2HST_RSPQ_UPDATE:
1384 *isrp = r2hisr & 0xffff;
1389 hccr = ISP_READ(isp, HCCR);
1390 if (hccr & HCCR_PAUSE) {
1391 ISP_WRITE(isp, HCCR, HCCR_RESET);
1392 isp_prt(isp, ISP_LOGERR,
1393 "RISC paused at interrupt (%x->%x)", hccr,
1394 ISP_READ(isp, HCCR));
1395 ISP_WRITE(isp, BIU_ICR, 0);
1397 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n",
1405 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp,
1406 uint16_t *semap, uint16_t *mbox0p)
1408 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1411 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU2400_R2HSTSLO));
1412 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1413 if ((r2hisr & BIU2400_R2HST_INTR) == 0) {
1417 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) {
1418 case ISP2400R2HST_ROM_MBX_OK:
1419 case ISP2400R2HST_ROM_MBX_FAIL:
1420 case ISP2400R2HST_MBX_OK:
1421 case ISP2400R2HST_MBX_FAIL:
1422 case ISP2400R2HST_ASYNC_EVENT:
1423 *isrp = r2hisr & 0xffff;
1424 *mbox0p = (r2hisr >> 16);
1427 case ISP2400R2HST_RSPQ_UPDATE:
1428 case ISP2400R2HST_ATIO_RSPQ_UPDATE:
1429 case ISP2400R2HST_ATIO_RQST_UPDATE:
1430 *isrp = r2hisr & 0xffff;
1435 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
1436 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1442 isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1445 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1448 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1450 * We will assume that someone has paused the RISC processor.
1452 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1453 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1454 oldconf | BIU_PCI_CONF1_SXP);
1456 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1457 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1458 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1464 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val)
1466 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1470 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1472 * We will assume that someone has paused the RISC processor.
1474 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1475 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1476 oldconf | BIU_PCI_CONF1_SXP);
1478 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1481 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1483 junk = BXR2(pcs, IspVirt2Off(isp, regoff));
1485 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1486 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1488 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1494 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1496 uint32_t rv, oc = 0;
1497 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1499 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1500 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1503 * We will assume that someone has paused the RISC processor.
1505 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1506 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1507 if (regoff & SXP_BANK1_SELECT)
1508 tc |= BIU_PCI1080_CONF1_SXP1;
1510 tc |= BIU_PCI1080_CONF1_SXP0;
1511 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1512 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1513 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1514 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1515 oc | BIU_PCI1080_CONF1_DMA);
1517 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1519 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1525 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val)
1527 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1531 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1532 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1535 * We will assume that someone has paused the RISC processor.
1537 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1538 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1539 if (regoff & SXP_BANK1_SELECT)
1540 tc |= BIU_PCI1080_CONF1_SXP1;
1542 tc |= BIU_PCI1080_CONF1_SXP0;
1543 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1544 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1545 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1546 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1547 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1548 oc | BIU_PCI1080_CONF1_DMA);
1549 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1551 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1552 junk = BXR2(pcs, IspVirt2Off(isp, regoff));
1554 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1555 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1560 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff)
1562 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1564 int block = regoff & _BLK_REG_MASK;
1570 return (BXR2(pcs, IspVirt2Off(pcs, regoff)));
1572 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff);
1573 return (0xffffffff);
1575 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff);
1576 return (0xffffffff);
1578 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff);
1579 return (0xffffffff);
1581 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff);
1582 return (0xffffffff);
1587 case BIU2400_FLASH_ADDR:
1588 case BIU2400_FLASH_DATA:
1592 case BIU2400_REQINP:
1593 case BIU2400_REQOUTP:
1594 case BIU2400_RSPINP:
1595 case BIU2400_RSPOUTP:
1596 case BIU2400_PRI_RQINP:
1597 case BIU2400_PRI_RSPINP:
1598 case BIU2400_ATIO_RSPINP:
1599 case BIU2400_ATIO_REQINP:
1604 rv = BXR4(pcs, IspVirt2Off(pcs, regoff));
1606 case BIU2400_R2HSTSLO:
1607 rv = BXR4(pcs, IspVirt2Off(pcs, regoff));
1609 case BIU2400_R2HSTSHI:
1610 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)) >> 16;
1613 isp_prt(isp, ISP_LOGERR,
1614 "isp_pci_rd_reg_2400: unknown offset %x", regoff);
1622 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val)
1624 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1625 int block = regoff & _BLK_REG_MASK;
1632 BXW2(pcs, IspVirt2Off(pcs, regoff), val);
1633 junk = BXR2(pcs, IspVirt2Off(pcs, regoff));
1636 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff);
1639 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff);
1642 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff);
1645 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x",
1651 case BIU2400_FLASH_ADDR:
1652 case BIU2400_FLASH_DATA:
1656 case BIU2400_REQINP:
1657 case BIU2400_REQOUTP:
1658 case BIU2400_RSPINP:
1659 case BIU2400_RSPOUTP:
1660 case BIU2400_PRI_RQINP:
1661 case BIU2400_PRI_RSPINP:
1662 case BIU2400_ATIO_RSPINP:
1663 case BIU2400_ATIO_REQINP:
1668 BXW4(pcs, IspVirt2Off(pcs, regoff), val);
1669 junk = BXR4(pcs, IspVirt2Off(pcs, regoff));
1672 isp_prt(isp, ISP_LOGERR,
1673 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff);
1684 static void imc(void *, bus_dma_segment_t *, int, int);
1687 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1689 struct imush *imushp = (struct imush *) arg;
1691 imushp->error = error;
1693 ispsoftc_t *isp =imushp->isp;
1694 bus_addr_t addr = segs->ds_addr;
1696 isp->isp_rquest_dma = addr;
1697 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1698 isp->isp_result_dma = addr;
1700 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1701 FCPARAM(isp)->isp_scdma = addr;
1707 isp_pci_mbxdma(ispsoftc_t *isp)
1709 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1713 bus_size_t slim; /* segment size */
1714 bus_addr_t llim; /* low limit of unavailable dma */
1715 bus_addr_t hlim; /* high limit of unavailable dma */
1719 * Already been here? If so, leave...
1721 if (isp->isp_rquest) {
1725 if (isp->isp_maxcmds == 0) {
1726 isp_prt(isp, ISP_LOGERR, "maxcmds not set");
1730 hlim = BUS_SPACE_MAXADDR;
1731 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1732 slim = (bus_size_t) (1ULL << 32);
1733 llim = BUS_SPACE_MAXADDR;
1735 llim = BUS_SPACE_MAXADDR_32BIT;
1740 * XXX: We don't really support 64 bit target mode for parallel scsi yet
1742 #ifdef ISP_TARGET_MODE
1743 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) {
1744 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet");
1750 if (isp_dma_tag_create(BUS_DMA_ROOTARG(pcs->pci_dev), 1, slim, llim,
1751 hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0,
1753 isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1759 len = sizeof (XS_T **) * isp->isp_maxcmds;
1760 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1761 if (isp->isp_xflist == NULL) {
1762 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1766 #ifdef ISP_TARGET_MODE
1767 len = sizeof (void **) * isp->isp_maxcmds;
1768 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1769 if (isp->isp_tgtlist == NULL) {
1770 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1775 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1776 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
1777 if (pcs->dmaps == NULL) {
1778 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1779 free(isp->isp_xflist, M_DEVBUF);
1780 #ifdef ISP_TARGET_MODE
1781 free(isp->isp_tgtlist, M_DEVBUF);
1788 * Allocate and map the request, result queues, plus FC scratch area.
1790 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1791 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1793 len += ISP2100_SCRLEN;
1796 ns = (len / PAGE_SIZE) + 1;
1798 * Create a tag for the control spaces- force it to within 32 bits.
1800 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim,
1801 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1802 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1803 isp_prt(isp, ISP_LOGERR,
1804 "cannot create a dma tag for control spaces");
1805 free(pcs->dmaps, M_DEVBUF);
1806 free(isp->isp_xflist, M_DEVBUF);
1807 #ifdef ISP_TARGET_MODE
1808 free(isp->isp_tgtlist, M_DEVBUF);
1814 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1815 &isp->isp_cdmap) != 0) {
1816 isp_prt(isp, ISP_LOGERR,
1817 "cannot allocate %d bytes of CCB memory", len);
1818 bus_dma_tag_destroy(isp->isp_cdmat);
1819 free(isp->isp_xflist, M_DEVBUF);
1820 #ifdef ISP_TARGET_MODE
1821 free(isp->isp_tgtlist, M_DEVBUF);
1823 free(pcs->dmaps, M_DEVBUF);
1828 for (i = 0; i < isp->isp_maxcmds; i++) {
1829 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1831 isp_prt(isp, ISP_LOGERR,
1832 "error %d creating per-cmd DMA maps", error);
1834 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1842 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1844 isp_prt(isp, ISP_LOGERR,
1845 "error %d loading dma map for control areas", im.error);
1849 isp->isp_rquest = base;
1850 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1851 isp->isp_result = base;
1853 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1854 FCPARAM(isp)->isp_scratch = base;
1860 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1861 bus_dma_tag_destroy(isp->isp_cdmat);
1862 free(isp->isp_xflist, M_DEVBUF);
1863 #ifdef ISP_TARGET_MODE
1864 free(isp->isp_tgtlist, M_DEVBUF);
1866 free(pcs->dmaps, M_DEVBUF);
1868 isp->isp_rquest = NULL;
1881 #define MUSHERR_NOQENTRIES -2
1883 #ifdef ISP_TARGET_MODE
1885 * We need to handle DMA for target mode differently from initiator mode.
1887 * DMA mapping and construction and submission of CTIO Request Entries
1888 * and rendevous for completion are very tightly coupled because we start
1889 * out by knowing (per platform) how much data we have to move, but we
1890 * don't know, up front, how many DMA mapping segments will have to be used
1891 * cover that data, so we don't know how many CTIO Request Entries we
1892 * will end up using. Further, for performance reasons we may want to
1893 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1895 * The standard vector still goes through isp_pci_dmasetup, but the callback
1896 * for the DMA mapping routines comes here instead with the whole transfer
1897 * mapped and a pointer to a partially filled in already allocated request
1898 * queue entry. We finish the job.
1900 static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1901 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1903 #define STATUS_WITH_DATA 1
1906 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1909 struct ccb_scsiio *csio;
1911 struct isp_pcisoftc *pcs;
1913 ct_entry_t *cto, *qe;
1914 uint8_t scsi_status;
1915 uint32_t curi, nxti, handle;
1918 int nth_ctio, nctios, send_status;
1920 mp = (mush_t *) arg;
1927 csio = mp->cmd_token;
1929 curi = isp->isp_reqidx;
1930 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1933 cto->ct_seg_count = 0;
1934 cto->ct_header.rqs_entry_count = 1;
1935 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1938 cto->ct_header.rqs_seqno = 1;
1939 isp_prt(isp, ISP_LOGTDEBUG1,
1940 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1941 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1942 cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1943 cto->ct_scsi_status, cto->ct_resid);
1944 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1945 isp_put_ctio(isp, cto, qe);
1949 nctios = nseg / ISP_RQDSEG;
1950 if (nseg % ISP_RQDSEG) {
1955 * Save syshandle, and potentially any SCSI status, which we'll
1956 * reinsert on the last CTIO we're going to send.
1959 handle = cto->ct_syshandle;
1960 cto->ct_syshandle = 0;
1961 cto->ct_header.rqs_seqno = 0;
1962 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1965 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1966 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1968 * Preserve residual.
1970 resid = cto->ct_resid;
1973 * Save actual SCSI status.
1975 scsi_status = cto->ct_scsi_status;
1977 #ifndef STATUS_WITH_DATA
1978 sflags |= CT_NO_DATA;
1980 * We can't do a status at the same time as a data CTIO, so
1981 * we need to synthesize an extra CTIO at this level.
1986 sflags = scsi_status = resid = 0;
1990 cto->ct_scsi_status = 0;
1992 pcs = (struct isp_pcisoftc *)isp;
1993 dp = &pcs->dmaps[isp_handle_index(handle)];
1994 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1995 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1997 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2002 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
2009 if (seglim > ISP_RQDSEG)
2010 seglim = ISP_RQDSEG;
2012 for (seg = 0; seg < seglim; seg++, nseg--) {
2014 * Unlike normal initiator commands, we don't
2015 * do any swizzling here.
2017 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
2018 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
2019 cto->ct_xfrlen += dm_segs->ds_len;
2022 cto->ct_seg_count = seg;
2025 * This case should only happen when we're sending an
2026 * extra CTIO with final status.
2028 if (send_status == 0) {
2029 isp_prt(isp, ISP_LOGWARN,
2030 "tdma_mk ran out of segments");
2037 * At this point, the fields ct_lun, ct_iid, ct_tagval,
2038 * ct_tagtype, and ct_timeout have been carried over
2039 * unchanged from what our caller had set.
2041 * The dataseg fields and the seg_count fields we just got
2042 * through setting. The data direction we've preserved all
2043 * along and only clear it if we're now sending status.
2046 if (nth_ctio == nctios - 1) {
2048 * We're the last in a sequence of CTIOs, so mark
2049 * this CTIO and save the handle to the CCB such that
2050 * when this CTIO completes we can free dma resources
2051 * and do whatever else we need to do to finish the
2052 * rest of the command. We *don't* give this to the
2053 * firmware to work on- the caller will do that.
2056 cto->ct_syshandle = handle;
2057 cto->ct_header.rqs_seqno = 1;
2060 cto->ct_scsi_status = scsi_status;
2061 cto->ct_flags |= sflags;
2062 cto->ct_resid = resid;
2065 isp_prt(isp, ISP_LOGTDEBUG1,
2066 "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
2067 "scsi status %x resid %d",
2068 cto->ct_fwhandle, csio->ccb_h.target_lun,
2069 cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
2070 cto->ct_scsi_status, cto->ct_resid);
2072 isp_prt(isp, ISP_LOGTDEBUG1,
2073 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
2074 cto->ct_fwhandle, csio->ccb_h.target_lun,
2075 cto->ct_iid, cto->ct_tag_val,
2078 isp_put_ctio(isp, cto, qe);
2079 ISP_TDQE(isp, "last tdma_mk", curi, cto);
2081 MEMORYBARRIER(isp, SYNC_REQUEST,
2085 ct_entry_t *oqe = qe;
2088 * Make sure syshandle fields are clean
2090 cto->ct_syshandle = 0;
2091 cto->ct_header.rqs_seqno = 0;
2093 isp_prt(isp, ISP_LOGTDEBUG1,
2094 "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
2095 cto->ct_fwhandle, csio->ccb_h.target_lun,
2096 cto->ct_iid, cto->ct_flags);
2102 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2103 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
2104 if (nxti == mp->optr) {
2105 isp_prt(isp, ISP_LOGTDEBUG0,
2106 "Queue Overflow in tdma_mk");
2107 mp->error = MUSHERR_NOQENTRIES;
2112 * Now that we're done with the old CTIO,
2113 * flush it out to the request queue.
2115 ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
2116 isp_put_ctio(isp, cto, oqe);
2117 if (nth_ctio != 0) {
2118 MEMORYBARRIER(isp, SYNC_REQUEST, curi,
2121 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
2124 * Reset some fields in the CTIO so we can reuse
2125 * for the next one we'll flush to the request
2128 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
2129 cto->ct_header.rqs_entry_count = 1;
2130 cto->ct_header.rqs_flags = 0;
2132 cto->ct_scsi_status = 0;
2135 cto->ct_seg_count = 0;
2136 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
2143 * We don't have to do multiple CTIOs here. Instead, we can just do
2144 * continuation segments as needed. This greatly simplifies the code
2145 * improves performance.
2149 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2152 struct ccb_scsiio *csio;
2154 ct2_entry_t *cto, *qe;
2155 uint32_t curi, nxti;
2160 mp = (mush_t *) arg;
2167 csio = mp->cmd_token;
2170 curi = isp->isp_reqidx;
2171 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
2174 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
2175 isp_prt(isp, ISP_LOGWARN,
2176 "dma2_tgt_fc, a status CTIO2 without MODE1 "
2177 "set (0x%x)", cto->ct_flags);
2182 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
2183 * flags to NO DATA and clear relative offset flags.
2184 * We preserve the ct_resid and the response area.
2186 cto->ct_header.rqs_seqno = 1;
2187 cto->ct_seg_count = 0;
2189 isp_prt(isp, ISP_LOGTDEBUG1,
2190 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
2191 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
2192 cto->ct_iid, cto->ct_flags, cto->ct_status,
2193 cto->rsp.m1.ct_scsi_status, cto->ct_resid);
2194 if (FCPARAM(isp)->isp_2klogin) {
2196 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
2198 isp_put_ctio2(isp, cto, qe);
2200 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
2204 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
2205 isp_prt(isp, ISP_LOGERR,
2206 "dma2_tgt_fc, a data CTIO2 without MODE0 set "
2207 "(0x%x)", cto->ct_flags);
2216 * Check to see if we need to DAC addressing or not.
2218 * Any address that's over the 4GB boundary causes this
2222 if (sizeof (bus_addr_t) > 4) {
2223 for (segcnt = 0; segcnt < nseg; segcnt++) {
2224 uint64_t addr = dm_segs[segcnt].ds_addr;
2225 if (addr >= 0x100000000LL) {
2230 if (segcnt != nseg) {
2231 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3;
2232 seglim = ISP_RQDSEG_T3;
2233 ds64 = &cto->rsp.m0.u.ct_dataseg64[0];
2236 seglim = ISP_RQDSEG_T2;
2238 ds = &cto->rsp.m0.u.ct_dataseg[0];
2240 cto->ct_seg_count = 0;
2243 * Set up the CTIO2 data segments.
2245 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg;
2246 cto->ct_seg_count++, segcnt++) {
2249 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
2250 ds64->ds_base = dm_segs[segcnt].ds_addr;
2251 ds64->ds_count = dm_segs[segcnt].ds_len;
2254 ds->ds_base = dm_segs[segcnt].ds_addr;
2255 ds->ds_count = dm_segs[segcnt].ds_len;
2258 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
2259 #if __FreeBSD_version < 500000
2260 isp_prt(isp, ISP_LOGTDEBUG1,
2261 "isp_send_ctio2: ent0[%d]0x%llx:%llu",
2262 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr,
2263 (uint64_t)dm_segs[segcnt].ds_len);
2265 isp_prt(isp, ISP_LOGTDEBUG1,
2266 "isp_send_ctio2: ent0[%d]0x%jx:%ju",
2267 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr,
2268 (uintmax_t)dm_segs[segcnt].ds_len);
2272 while (segcnt < nseg) {
2275 ispcontreq_t local, *crq = &local, *qep;
2277 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2279 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
2280 if (nxti == mp->optr) {
2282 isp_prt(isp, ISP_LOGTDEBUG0,
2283 "tdma_mkfc: request queue overflow");
2284 mp->error = MUSHERR_NOQENTRIES;
2287 cto->ct_header.rqs_entry_count++;
2288 MEMZERO((void *)crq, sizeof (*crq));
2289 crq->req_header.rqs_entry_count = 1;
2290 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) {
2291 seglim = ISP_CDSEG64;
2293 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0];
2294 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2297 ds = &crq->req_dataseg[0];
2299 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2301 for (seg = 0; segcnt < nseg && seg < seglim;
2305 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
2306 ds64->ds_base = dm_segs[segcnt].ds_addr;
2307 ds64->ds_count = dm_segs[segcnt].ds_len;
2310 ds->ds_base = dm_segs[segcnt].ds_addr;
2311 ds->ds_count = dm_segs[segcnt].ds_len;
2314 #if __FreeBSD_version < 500000
2315 isp_prt(isp, ISP_LOGTDEBUG1,
2316 "isp_send_ctio2: ent%d[%d]%llx:%llu",
2317 cto->ct_header.rqs_entry_count-1, seg,
2318 (uint64_t)dm_segs[segcnt].ds_addr,
2319 (uint64_t)dm_segs[segcnt].ds_len);
2321 isp_prt(isp, ISP_LOGTDEBUG1,
2322 "isp_send_ctio2: ent%d[%d]%jx:%ju",
2323 cto->ct_header.rqs_entry_count-1, seg,
2324 (uintmax_t)dm_segs[segcnt].ds_addr,
2325 (uintmax_t)dm_segs[segcnt].ds_len);
2327 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
2328 cto->ct_seg_count++;
2330 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
2331 isp_put_cont_req(isp, crq, qep);
2332 ISP_TDQE(isp, "cont entry", curi, qep);
2336 * No do final twiddling for the CTIO itself.
2338 cto->ct_header.rqs_seqno = 1;
2339 isp_prt(isp, ISP_LOGTDEBUG1,
2340 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
2341 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
2342 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
2344 if (FCPARAM(isp)->isp_2klogin) {
2345 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
2347 isp_put_ctio2(isp, cto, qe);
2349 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
2354 static void dma_2400(void *, bus_dma_segment_t *, int, int);
2355 static void dma2_a64(void *, bus_dma_segment_t *, int, int);
2356 static void dma2(void *, bus_dma_segment_t *, int, int);
2359 dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2363 struct ccb_scsiio *csio;
2364 struct isp_pcisoftc *pcs;
2366 bus_dma_segment_t *eseg;
2368 int seglim, datalen;
2371 mp = (mush_t *) arg;
2378 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2383 csio = mp->cmd_token;
2386 pcs = (struct isp_pcisoftc *)mp->isp;
2387 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2390 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2391 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2393 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2395 datalen = XS_XFRLEN(csio);
2398 * We're passed an initial partially filled in entry that
2399 * has most fields filled in except for data transfer
2402 * Our job is to fill in the initial request queue entry and
2403 * then to start allocating and filling in continuation entries
2404 * until we've covered the entire transfer.
2407 rq->req_header.rqs_entry_type = RQSTYPE_T7RQS;
2408 rq->req_dl = datalen;
2409 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2410 rq->req_alen_datadir = 0x2;
2412 rq->req_alen_datadir = 0x1;
2415 eseg = dm_segs + nseg;
2417 rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr);
2418 rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr);
2419 rq->req_dataseg.ds_count = dm_segs->ds_len;
2421 datalen -= dm_segs->ds_len;
2424 rq->req_seg_count++;
2426 while (datalen > 0 && dm_segs != eseg) {
2428 ispcontreq64_t local, *crq = &local, *cqe;
2430 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2432 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2433 if (nxti == mp->optr) {
2434 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2435 mp->error = MUSHERR_NOQENTRIES;
2438 rq->req_header.rqs_entry_count++;
2439 MEMZERO((void *)crq, sizeof (*crq));
2440 crq->req_header.rqs_entry_count = 1;
2441 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2444 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
2445 crq->req_dataseg[seglim].ds_base =
2446 DMA_LO32(dm_segs->ds_addr);
2447 crq->req_dataseg[seglim].ds_basehi =
2448 DMA_HI32(dm_segs->ds_addr);
2449 crq->req_dataseg[seglim].ds_count =
2451 rq->req_seg_count++;
2454 datalen -= dm_segs->ds_len;
2456 if (isp->isp_dblev & ISP_LOGDEBUG1) {
2457 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq);
2459 isp_put_cont64_req(isp, crq, cqe);
2460 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2466 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2470 struct ccb_scsiio *csio;
2471 struct isp_pcisoftc *pcs;
2473 bus_dma_segment_t *eseg;
2475 int seglim, datalen;
2478 mp = (mush_t *) arg;
2485 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2489 csio = mp->cmd_token;
2492 pcs = (struct isp_pcisoftc *)mp->isp;
2493 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2496 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2497 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2499 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2501 datalen = XS_XFRLEN(csio);
2504 * We're passed an initial partially filled in entry that
2505 * has most fields filled in except for data transfer
2508 * Our job is to fill in the initial request queue entry and
2509 * then to start allocating and filling in continuation entries
2510 * until we've covered the entire transfer.
2514 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
2515 seglim = ISP_RQDSEG_T3;
2516 ((ispreqt3_t *)rq)->req_totalcnt = datalen;
2517 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2518 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2520 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2523 rq->req_header.rqs_entry_type = RQSTYPE_A64;
2524 if (csio->cdb_len > 12) {
2527 seglim = ISP_RQDSEG_A64;
2529 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2530 rq->req_flags |= REQFLAG_DATA_IN;
2532 rq->req_flags |= REQFLAG_DATA_OUT;
2536 eseg = dm_segs + nseg;
2538 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2540 ispreqt3_t *rq3 = (ispreqt3_t *)rq;
2541 rq3->req_dataseg[rq3->req_seg_count].ds_base =
2542 DMA_LO32(dm_segs->ds_addr);
2543 rq3->req_dataseg[rq3->req_seg_count].ds_basehi =
2544 DMA_HI32(dm_segs->ds_addr);
2545 rq3->req_dataseg[rq3->req_seg_count].ds_count =
2548 rq->req_dataseg[rq->req_seg_count].ds_base =
2549 DMA_LO32(dm_segs->ds_addr);
2550 rq->req_dataseg[rq->req_seg_count].ds_basehi =
2551 DMA_HI32(dm_segs->ds_addr);
2552 rq->req_dataseg[rq->req_seg_count].ds_count =
2555 datalen -= dm_segs->ds_len;
2556 rq->req_seg_count++;
2560 while (datalen > 0 && dm_segs != eseg) {
2562 ispcontreq64_t local, *crq = &local, *cqe;
2564 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2566 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2567 if (nxti == mp->optr) {
2568 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2569 mp->error = MUSHERR_NOQENTRIES;
2572 rq->req_header.rqs_entry_count++;
2573 MEMZERO((void *)crq, sizeof (*crq));
2574 crq->req_header.rqs_entry_count = 1;
2575 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2578 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
2579 crq->req_dataseg[seglim].ds_base =
2580 DMA_LO32(dm_segs->ds_addr);
2581 crq->req_dataseg[seglim].ds_basehi =
2582 DMA_HI32(dm_segs->ds_addr);
2583 crq->req_dataseg[seglim].ds_count =
2585 rq->req_seg_count++;
2588 datalen -= dm_segs->ds_len;
2590 if (isp->isp_dblev & ISP_LOGDEBUG1) {
2591 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq);
2593 isp_put_cont64_req(isp, crq, cqe);
2594 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2600 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2604 struct ccb_scsiio *csio;
2605 struct isp_pcisoftc *pcs;
2607 bus_dma_segment_t *eseg;
2609 int seglim, datalen;
2612 mp = (mush_t *) arg;
2619 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2623 csio = mp->cmd_token;
2626 pcs = (struct isp_pcisoftc *)mp->isp;
2627 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2630 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2631 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2633 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2636 datalen = XS_XFRLEN(csio);
2639 * We're passed an initial partially filled in entry that
2640 * has most fields filled in except for data transfer
2643 * Our job is to fill in the initial request queue entry and
2644 * then to start allocating and filling in continuation entries
2645 * until we've covered the entire transfer.
2649 seglim = ISP_RQDSEG_T2;
2650 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
2651 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2652 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2654 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2657 if (csio->cdb_len > 12) {
2660 seglim = ISP_RQDSEG;
2662 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2663 rq->req_flags |= REQFLAG_DATA_IN;
2665 rq->req_flags |= REQFLAG_DATA_OUT;
2669 eseg = dm_segs + nseg;
2671 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2673 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
2674 rq2->req_dataseg[rq2->req_seg_count].ds_base =
2675 DMA_LO32(dm_segs->ds_addr);
2676 rq2->req_dataseg[rq2->req_seg_count].ds_count =
2679 rq->req_dataseg[rq->req_seg_count].ds_base =
2680 DMA_LO32(dm_segs->ds_addr);
2681 rq->req_dataseg[rq->req_seg_count].ds_count =
2684 datalen -= dm_segs->ds_len;
2685 rq->req_seg_count++;
2689 while (datalen > 0 && dm_segs != eseg) {
2691 ispcontreq_t local, *crq = &local, *cqe;
2693 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2695 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2696 if (nxti == mp->optr) {
2697 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2698 mp->error = MUSHERR_NOQENTRIES;
2701 rq->req_header.rqs_entry_count++;
2702 MEMZERO((void *)crq, sizeof (*crq));
2703 crq->req_header.rqs_entry_count = 1;
2704 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2707 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
2708 crq->req_dataseg[seglim].ds_base =
2709 DMA_LO32(dm_segs->ds_addr);
2710 crq->req_dataseg[seglim].ds_count =
2712 rq->req_seg_count++;
2715 datalen -= dm_segs->ds_len;
2717 if (isp->isp_dblev & ISP_LOGDEBUG1) {
2718 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq);
2720 isp_put_cont_req(isp, crq, cqe);
2721 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2727 * We enter with ISP_LOCK held
2730 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq,
2731 uint32_t *nxtip, uint32_t optr)
2733 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2735 bus_dmamap_t *dp = NULL;
2737 void (*eptr)(void *, bus_dma_segment_t *, int, int);
2739 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
2740 #ifdef ISP_TARGET_MODE
2741 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
2747 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2748 (csio->dxfer_len == 0)) {
2751 mp->cmd_token = csio;
2752 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */
2756 ISPLOCK_2_CAMLOCK(isp);
2757 (*eptr)(mp, NULL, 0, 0);
2758 CAMLOCK_2_ISPLOCK(isp);
2765 } else if (sizeof (bus_addr_t) > 4) {
2772 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2773 (csio->dxfer_len == 0)) {
2774 rq->req_seg_count = 1;
2779 * Do a virtual grapevine step to collect info for
2780 * the callback dma allocation that we have to use...
2784 mp->cmd_token = csio;
2790 ISPLOCK_2_CAMLOCK(isp);
2791 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2792 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2794 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2796 error = bus_dmamap_load(pcs->dmat, *dp,
2797 csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
2798 if (error == EINPROGRESS) {
2799 bus_dmamap_unload(pcs->dmat, *dp);
2801 isp_prt(isp, ISP_LOGERR,
2802 "deferred dma allocation not supported");
2803 } else if (error && mp->error == 0) {
2805 isp_prt(isp, ISP_LOGERR,
2806 "error %d in dma mapping code", error);
2812 /* Pointer to physical buffer */
2813 struct bus_dma_segment seg;
2814 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
2815 seg.ds_len = csio->dxfer_len;
2816 (*eptr)(mp, &seg, 1, 0);
2819 struct bus_dma_segment *segs;
2821 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2822 isp_prt(isp, ISP_LOGERR,
2823 "Physical segment pointers unsupported");
2825 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2826 isp_prt(isp, ISP_LOGERR,
2827 "Virtual segment addresses unsupported");
2830 /* Just use the segments provided */
2831 segs = (struct bus_dma_segment *) csio->data_ptr;
2832 (*eptr)(mp, segs, csio->sglist_cnt, 0);
2835 CAMLOCK_2_ISPLOCK(isp);
2837 int retval = CMD_COMPLETE;
2838 if (mp->error == MUSHERR_NOQENTRIES) {
2839 retval = CMD_EAGAIN;
2840 } else if (mp->error == EFBIG) {
2841 XS_SETERR(csio, CAM_REQ_TOO_BIG);
2842 } else if (mp->error == EINVAL) {
2843 XS_SETERR(csio, CAM_REQ_INVALID);
2845 XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
2850 if (isp->isp_dblev & ISP_LOGDEBUG1) {
2851 isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq);
2853 switch (rq->req_header.rqs_entry_type) {
2854 case RQSTYPE_REQUEST:
2855 isp_put_request(isp, rq, qep);
2857 case RQSTYPE_CMDONLY:
2858 isp_put_extended_request(isp, (ispextreq_t *)rq,
2859 (ispextreq_t *)qep);
2862 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
2866 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
2869 isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep);
2872 return (CMD_QUEUED);
2876 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle)
2878 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2879 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
2880 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2881 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
2883 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
2885 bus_dmamap_unload(pcs->dmat, *dp);
2890 isp_pci_reset0(ispsoftc_t *isp)
2892 ISP_DISABLE_INTS(isp);
2896 isp_pci_reset1(ispsoftc_t *isp)
2898 if (!IS_24XX(isp)) {
2899 /* Make sure the BIOS is disabled */
2900 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2902 /* and enable interrupts */
2903 ISP_ENABLE_INTS(isp);
2907 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
2909 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2911 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2913 printf("%s:\n", device_get_nameunit(isp->isp_dev));
2915 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2917 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2918 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2919 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2920 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2924 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2925 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2926 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2927 ISP_READ(isp, CDMA_FIFO_STS));
2928 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2929 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2930 ISP_READ(isp, DDMA_FIFO_STS));
2931 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2932 ISP_READ(isp, SXP_INTERRUPT),
2933 ISP_READ(isp, SXP_GROSS_ERR),
2934 ISP_READ(isp, SXP_PINS_CTRL));
2935 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2937 printf(" mbox regs: %x %x %x %x %x\n",
2938 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2939 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2940 ISP_READ(isp, OUTMAILBOX4));
2941 printf(" PCI Status Command/Status=%x\n",
2942 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));