2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009-2018 Alexander Motin <mav@FreeBSD.org>
5 * Copyright (c) 1997-2008 by Matthew Jacob
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/linker.h>
41 #include <sys/firmware.h>
43 #include <sys/stdint.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <machine/bus.h>
47 #include <machine/resource.h>
49 #include <sys/malloc.h>
51 #include <dev/isp/isp_freebsd.h>
53 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int);
54 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t);
55 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
56 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t);
57 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int);
58 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t);
59 static uint32_t isp_pci_rd_reg_2600(ispsoftc_t *, int);
60 static void isp_pci_wr_reg_2600(ispsoftc_t *, int, uint32_t);
61 static void isp_pci_run_isr(ispsoftc_t *);
62 static void isp_pci_run_isr_2300(ispsoftc_t *);
63 static void isp_pci_run_isr_2400(ispsoftc_t *);
64 static int isp_pci_mbxdma(ispsoftc_t *);
65 static void isp_pci_mbxdmafree(ispsoftc_t *);
66 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *);
67 static int isp_pci_irqsetup(ispsoftc_t *);
68 static void isp_pci_dumpregs(ispsoftc_t *, const char *);
70 static struct ispmdvec mdvec = {
76 isp_common_dmateardown,
80 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
83 static struct ispmdvec mdvec_1080 = {
89 isp_common_dmateardown,
93 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
96 static struct ispmdvec mdvec_12160 = {
102 isp_common_dmateardown,
106 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
109 static struct ispmdvec mdvec_2100 = {
115 isp_common_dmateardown,
120 static struct ispmdvec mdvec_2200 = {
126 isp_common_dmateardown,
131 static struct ispmdvec mdvec_2300 = {
132 isp_pci_run_isr_2300,
137 isp_common_dmateardown,
142 static struct ispmdvec mdvec_2400 = {
143 isp_pci_run_isr_2400,
148 isp_common_dmateardown,
153 static struct ispmdvec mdvec_2500 = {
154 isp_pci_run_isr_2400,
159 isp_common_dmateardown,
164 static struct ispmdvec mdvec_2600 = {
165 isp_pci_run_isr_2400,
170 isp_common_dmateardown,
175 static struct ispmdvec mdvec_2700 = {
176 isp_pci_run_isr_2400,
181 isp_common_dmateardown,
186 #ifndef PCIM_CMD_INVEN
187 #define PCIM_CMD_INVEN 0x10
189 #ifndef PCIM_CMD_BUSMASTEREN
190 #define PCIM_CMD_BUSMASTEREN 0x0004
192 #ifndef PCIM_CMD_PERRESPEN
193 #define PCIM_CMD_PERRESPEN 0x0040
195 #ifndef PCIM_CMD_SEREN
196 #define PCIM_CMD_SEREN 0x0100
198 #ifndef PCIM_CMD_INTX_DISABLE
199 #define PCIM_CMD_INTX_DISABLE 0x0400
203 #define PCIR_COMMAND 0x04
206 #ifndef PCIR_CACHELNSZ
207 #define PCIR_CACHELNSZ 0x0c
210 #ifndef PCIR_LATTIMER
211 #define PCIR_LATTIMER 0x0d
215 #define PCIR_ROMADDR 0x30
218 #define PCI_VENDOR_QLOGIC 0x1077
220 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
221 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
222 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
223 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
224 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
225 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
227 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
228 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
229 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
230 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
231 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322
232 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422
233 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432
234 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532
235 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432
236 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312
237 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322
238 #define PCI_PRODUCT_QLOGIC_ISP2031 0x2031
239 #define PCI_PRODUCT_QLOGIC_ISP8031 0x8031
240 #define PCI_PRODUCT_QLOGIC_ISP2684 0x2171
241 #define PCI_PRODUCT_QLOGIC_ISP2692 0x2b61
242 #define PCI_PRODUCT_QLOGIC_ISP2714 0x2071
243 #define PCI_PRODUCT_QLOGIC_ISP2722 0x2261
245 #define PCI_QLOGIC_ISP1020 \
246 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
247 #define PCI_QLOGIC_ISP1080 \
248 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
249 #define PCI_QLOGIC_ISP10160 \
250 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
251 #define PCI_QLOGIC_ISP12160 \
252 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
253 #define PCI_QLOGIC_ISP1240 \
254 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
255 #define PCI_QLOGIC_ISP1280 \
256 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
258 #define PCI_QLOGIC_ISP2100 \
259 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
260 #define PCI_QLOGIC_ISP2200 \
261 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
262 #define PCI_QLOGIC_ISP2300 \
263 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
264 #define PCI_QLOGIC_ISP2312 \
265 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
266 #define PCI_QLOGIC_ISP2322 \
267 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
268 #define PCI_QLOGIC_ISP2422 \
269 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
270 #define PCI_QLOGIC_ISP2432 \
271 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC)
272 #define PCI_QLOGIC_ISP2532 \
273 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC)
274 #define PCI_QLOGIC_ISP5432 \
275 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC)
276 #define PCI_QLOGIC_ISP6312 \
277 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
278 #define PCI_QLOGIC_ISP6322 \
279 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
280 #define PCI_QLOGIC_ISP2031 \
281 ((PCI_PRODUCT_QLOGIC_ISP2031 << 16) | PCI_VENDOR_QLOGIC)
282 #define PCI_QLOGIC_ISP8031 \
283 ((PCI_PRODUCT_QLOGIC_ISP8031 << 16) | PCI_VENDOR_QLOGIC)
284 #define PCI_QLOGIC_ISP2684 \
285 ((PCI_PRODUCT_QLOGIC_ISP2684 << 16) | PCI_VENDOR_QLOGIC)
286 #define PCI_QLOGIC_ISP2692 \
287 ((PCI_PRODUCT_QLOGIC_ISP2692 << 16) | PCI_VENDOR_QLOGIC)
288 #define PCI_QLOGIC_ISP2714 \
289 ((PCI_PRODUCT_QLOGIC_ISP2714 << 16) | PCI_VENDOR_QLOGIC)
290 #define PCI_QLOGIC_ISP2722 \
291 ((PCI_PRODUCT_QLOGIC_ISP2722 << 16) | PCI_VENDOR_QLOGIC)
294 * Odd case for some AMI raid cards... We need to *not* attach to this.
296 #define AMI_RAID_SUBVENDOR_ID 0x101e
298 #define PCI_DFLT_LTNCY 0x40
299 #define PCI_DFLT_LNSZ 0x10
301 static int isp_pci_probe (device_t);
302 static int isp_pci_attach (device_t);
303 static int isp_pci_detach (device_t);
306 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev
307 struct isp_pcisoftc {
310 struct resource * regs;
311 struct resource * regs1;
312 struct resource * regs2;
315 struct resource * irq;
324 int16_t pci_poff[_NREG_BLKS];
330 static device_method_t isp_pci_methods[] = {
331 /* Device interface */
332 DEVMETHOD(device_probe, isp_pci_probe),
333 DEVMETHOD(device_attach, isp_pci_attach),
334 DEVMETHOD(device_detach, isp_pci_detach),
338 static driver_t isp_pci_driver = {
339 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
341 static devclass_t isp_devclass;
342 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
343 MODULE_DEPEND(isp, cam, 1, 1, 1);
344 MODULE_DEPEND(isp, firmware, 1, 1, 1);
345 static int isp_nvports = 0;
348 isp_pci_probe(device_t dev)
350 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
351 case PCI_QLOGIC_ISP1020:
352 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
354 case PCI_QLOGIC_ISP1080:
355 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
357 case PCI_QLOGIC_ISP1240:
358 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
360 case PCI_QLOGIC_ISP1280:
361 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
363 case PCI_QLOGIC_ISP10160:
364 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
366 case PCI_QLOGIC_ISP12160:
367 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
370 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
372 case PCI_QLOGIC_ISP2100:
373 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
375 case PCI_QLOGIC_ISP2200:
376 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
378 case PCI_QLOGIC_ISP2300:
379 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
381 case PCI_QLOGIC_ISP2312:
382 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
384 case PCI_QLOGIC_ISP2322:
385 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
387 case PCI_QLOGIC_ISP2422:
388 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
390 case PCI_QLOGIC_ISP2432:
391 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter");
393 case PCI_QLOGIC_ISP2532:
394 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter");
396 case PCI_QLOGIC_ISP5432:
397 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter");
399 case PCI_QLOGIC_ISP6312:
400 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
402 case PCI_QLOGIC_ISP6322:
403 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
405 case PCI_QLOGIC_ISP2031:
406 device_set_desc(dev, "Qlogic ISP 2031 PCI FC-AL Adapter");
408 case PCI_QLOGIC_ISP8031:
409 device_set_desc(dev, "Qlogic ISP 8031 PCI FCoE Adapter");
411 case PCI_QLOGIC_ISP2684:
412 device_set_desc(dev, "Qlogic ISP 2684 PCI FC Adapter");
414 case PCI_QLOGIC_ISP2692:
415 device_set_desc(dev, "Qlogic ISP 2692 PCI FC Adapter");
417 case PCI_QLOGIC_ISP2714:
418 device_set_desc(dev, "Qlogic ISP 2714 PCI FC Adapter");
420 case PCI_QLOGIC_ISP2722:
421 device_set_desc(dev, "Qlogic ISP 2722 PCI FC Adapter");
426 if (isp_announced == 0 && bootverbose) {
427 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
428 "Core Version %d.%d\n",
429 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
430 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
434 * XXXX: Here is where we might load the f/w module
435 * XXXX: (or increase a reference count to it).
437 return (BUS_PROBE_DEFAULT);
441 isp_get_generic_options(device_t dev, ispsoftc_t *isp)
446 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) {
447 isp->isp_confopts |= ISP_CFG_NORELOAD;
450 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) {
451 isp->isp_confopts |= ISP_CFG_NONVRAM;
454 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval);
456 isp->isp_dblev = tval;
458 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
461 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
464 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval);
465 if (tval > 0 && tval <= 254) {
469 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval);
470 isp_quickboot_time = tval;
474 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp)
478 char prefix[12], name[16];
483 snprintf(prefix, sizeof(prefix), "chan%d.", chan);
484 snprintf(name, sizeof(name), "%siid", prefix);
485 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
488 ISP_FC_PC(isp, chan)->default_id = 109 - chan;
490 ISP_SPI_PC(isp, chan)->iid = 7;
494 ISP_FC_PC(isp, chan)->default_id = tval - chan;
496 ISP_SPI_PC(isp, chan)->iid = tval;
498 isp->isp_confopts |= ISP_CFG_OWNLOOPID;
505 snprintf(name, sizeof(name), "%srole", prefix);
506 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
510 case ISP_ROLE_INITIATOR:
511 case ISP_ROLE_TARGET:
513 device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval);
521 tval = ISP_DEFAULT_ROLES;
523 ISP_FC_PC(isp, chan)->def_role = tval;
526 snprintf(name, sizeof(name), "%sfullduplex", prefix);
527 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
528 name, &tval) == 0 && tval != 0) {
529 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
532 snprintf(name, sizeof(name), "%stopology", prefix);
533 if (resource_string_value(device_get_name(dev), device_get_unit(dev),
534 name, (const char **) &sptr) == 0 && sptr != NULL) {
535 if (strcmp(sptr, "lport") == 0) {
536 isp->isp_confopts |= ISP_CFG_LPORT;
537 } else if (strcmp(sptr, "nport") == 0) {
538 isp->isp_confopts |= ISP_CFG_NPORT;
539 } else if (strcmp(sptr, "lport-only") == 0) {
540 isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
541 } else if (strcmp(sptr, "nport-only") == 0) {
542 isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
546 #ifdef ISP_FCTAPE_OFF
547 isp->isp_confopts |= ISP_CFG_NOFCTAPE;
549 isp->isp_confopts |= ISP_CFG_FCTAPE;
553 snprintf(name, sizeof(name), "%snofctape", prefix);
554 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
557 isp->isp_confopts &= ~ISP_CFG_FCTAPE;
558 isp->isp_confopts |= ISP_CFG_NOFCTAPE;
562 snprintf(name, sizeof(name), "%sfctape", prefix);
563 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
566 isp->isp_confopts &= ~ISP_CFG_NOFCTAPE;
567 isp->isp_confopts |= ISP_CFG_FCTAPE;
572 * Because the resource_*_value functions can neither return
573 * 64 bit integer values, nor can they be directly coerced
574 * to interpret the right hand side of the assignment as
575 * you want them to interpret it, we have to force WWN
576 * hint replacement to specify WWN strings with a leading
577 * 'w' (e..g w50000000aaaa0001). Sigh.
580 snprintf(name, sizeof(name), "%sportwwn", prefix);
581 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
582 name, (const char **) &sptr);
583 if (tval == 0 && sptr != NULL && *sptr++ == 'w') {
585 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16);
586 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) {
587 device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
588 ISP_FC_PC(isp, chan)->def_wwpn = 0;
593 snprintf(name, sizeof(name), "%snodewwn", prefix);
594 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
595 name, (const char **) &sptr);
596 if (tval == 0 && sptr != NULL && *sptr++ == 'w') {
598 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16);
599 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) {
600 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
601 ISP_FC_PC(isp, chan)->def_wwnn = 0;
606 snprintf(name, sizeof(name), "%sloop_down_limit", prefix);
607 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
609 if (tval >= 0 && tval < 0xffff) {
610 ISP_FC_PC(isp, chan)->loop_down_limit = tval;
612 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit;
616 snprintf(name, sizeof(name), "%sgone_device_time", prefix);
617 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
619 if (tval >= 0 && tval < 0xffff) {
620 ISP_FC_PC(isp, chan)->gone_device_time = tval;
622 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time;
627 isp_pci_attach(device_t dev)
629 struct isp_pcisoftc *pcs = device_get_softc(dev);
630 ispsoftc_t *isp = &pcs->pci_isp;
632 uint32_t data, cmd, linesz, did;
639 mtx_init(&isp->isp_lock, "isp", NULL, MTX_DEF);
642 * Get Generic Options
645 isp_get_generic_options(dev, isp);
647 linesz = PCI_DFLT_LNSZ;
648 pcs->regs = pcs->regs2 = NULL;
649 pcs->rgd = pcs->rtp = 0;
652 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
653 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
654 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
655 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
656 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
658 switch (pci_get_devid(dev)) {
659 case PCI_QLOGIC_ISP1020:
661 isp->isp_mdvec = &mdvec;
662 isp->isp_type = ISP_HA_SCSI_UNKNOWN;
664 case PCI_QLOGIC_ISP1080:
666 isp->isp_mdvec = &mdvec_1080;
667 isp->isp_type = ISP_HA_SCSI_1080;
668 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
670 case PCI_QLOGIC_ISP1240:
672 isp->isp_mdvec = &mdvec_1080;
673 isp->isp_type = ISP_HA_SCSI_1240;
675 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
677 case PCI_QLOGIC_ISP1280:
679 isp->isp_mdvec = &mdvec_1080;
680 isp->isp_type = ISP_HA_SCSI_1280;
681 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
683 case PCI_QLOGIC_ISP10160:
685 isp->isp_mdvec = &mdvec_12160;
686 isp->isp_type = ISP_HA_SCSI_10160;
687 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
689 case PCI_QLOGIC_ISP12160:
692 isp->isp_mdvec = &mdvec_12160;
693 isp->isp_type = ISP_HA_SCSI_12160;
694 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
696 case PCI_QLOGIC_ISP2100:
698 isp->isp_mdvec = &mdvec_2100;
699 isp->isp_type = ISP_HA_FC_2100;
700 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF;
701 if (pci_get_revid(dev) < 3) {
703 * XXX: Need to get the actual revision
704 * XXX: number of the 2100 FB. At any rate,
705 * XXX: lower cache line size for early revision
711 case PCI_QLOGIC_ISP2200:
713 isp->isp_mdvec = &mdvec_2200;
714 isp->isp_type = ISP_HA_FC_2200;
715 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF;
717 case PCI_QLOGIC_ISP2300:
719 isp->isp_mdvec = &mdvec_2300;
720 isp->isp_type = ISP_HA_FC_2300;
721 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
723 case PCI_QLOGIC_ISP2312:
724 case PCI_QLOGIC_ISP6312:
726 isp->isp_mdvec = &mdvec_2300;
727 isp->isp_type = ISP_HA_FC_2312;
728 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
730 case PCI_QLOGIC_ISP2322:
731 case PCI_QLOGIC_ISP6322:
733 isp->isp_mdvec = &mdvec_2300;
734 isp->isp_type = ISP_HA_FC_2322;
735 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
737 case PCI_QLOGIC_ISP2422:
738 case PCI_QLOGIC_ISP2432:
740 isp->isp_nchan += isp_nvports;
741 isp->isp_mdvec = &mdvec_2400;
742 isp->isp_type = ISP_HA_FC_2400;
743 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
745 case PCI_QLOGIC_ISP2532:
747 isp->isp_nchan += isp_nvports;
748 isp->isp_mdvec = &mdvec_2500;
749 isp->isp_type = ISP_HA_FC_2500;
750 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
752 case PCI_QLOGIC_ISP5432:
754 isp->isp_mdvec = &mdvec_2500;
755 isp->isp_type = ISP_HA_FC_2500;
756 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
758 case PCI_QLOGIC_ISP2031:
759 case PCI_QLOGIC_ISP8031:
761 isp->isp_nchan += isp_nvports;
762 isp->isp_mdvec = &mdvec_2600;
763 isp->isp_type = ISP_HA_FC_2600;
764 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
766 case PCI_QLOGIC_ISP2684:
767 case PCI_QLOGIC_ISP2692:
768 case PCI_QLOGIC_ISP2714:
769 case PCI_QLOGIC_ISP2722:
771 isp->isp_nchan += isp_nvports;
772 isp->isp_mdvec = &mdvec_2700;
773 isp->isp_type = ISP_HA_FC_2700;
774 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
777 device_printf(dev, "unknown device type\n");
781 isp->isp_revision = pci_get_revid(dev);
784 pcs->rtp = SYS_RES_MEMORY;
785 pcs->rgd = PCIR_BAR(0);
786 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd,
788 pcs->rtp1 = SYS_RES_MEMORY;
789 pcs->rgd1 = PCIR_BAR(2);
790 pcs->regs1 = bus_alloc_resource_any(dev, pcs->rtp1, &pcs->rgd1,
792 pcs->rtp2 = SYS_RES_MEMORY;
793 pcs->rgd2 = PCIR_BAR(4);
794 pcs->regs2 = bus_alloc_resource_any(dev, pcs->rtp2, &pcs->rgd2,
797 pcs->rtp = SYS_RES_MEMORY;
798 pcs->rgd = PCIR_BAR(1);
799 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd,
801 if (pcs->regs == NULL) {
802 pcs->rtp = SYS_RES_IOPORT;
803 pcs->rgd = PCIR_BAR(0);
804 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp,
805 &pcs->rgd, RF_ACTIVE);
808 if (pcs->regs == NULL) {
809 device_printf(dev, "Unable to map any ports\n");
813 device_printf(dev, "Using %s space register mapping\n",
814 (pcs->rtp == SYS_RES_IOPORT)? "I/O" : "Memory");
816 isp->isp_regs = pcs->regs;
817 isp->isp_regs2 = pcs->regs2;
820 psize = sizeof (fcparam);
821 xsize = sizeof (struct isp_fc);
823 psize = sizeof (sdparam);
824 xsize = sizeof (struct isp_spi);
826 psize *= isp->isp_nchan;
827 xsize *= isp->isp_nchan;
828 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
829 if (isp->isp_param == NULL) {
830 device_printf(dev, "cannot allocate parameter data\n");
833 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO);
834 if (isp->isp_osinfo.pc.ptr == NULL) {
835 device_printf(dev, "cannot allocate parameter data\n");
840 * Now that we know who we are (roughly) get/set specific options
842 for (i = 0; i < isp->isp_nchan; i++) {
843 isp_get_specific_options(dev, i, isp);
846 isp->isp_osinfo.fw = NULL;
847 if (isp->isp_osinfo.fw == NULL) {
848 snprintf(fwname, sizeof (fwname), "isp_%04x", did);
849 isp->isp_osinfo.fw = firmware_get(fwname);
851 if (isp->isp_osinfo.fw != NULL) {
852 isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname);
853 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data;
857 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set.
859 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
860 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
861 if (IS_2300(isp)) { /* per QLogic errata */
862 cmd &= ~PCIM_CMD_INVEN;
864 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
865 cmd &= ~PCIM_CMD_INTX_DISABLE;
868 cmd &= ~PCIM_CMD_INTX_DISABLE;
870 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
873 * Make sure the Cache Line Size register is set sensibly.
875 data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
876 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) {
877 isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data);
879 pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
883 * Make sure the Latency Timer is sane.
885 data = pci_read_config(dev, PCIR_LATTIMER, 1);
886 if (data < PCI_DFLT_LTNCY) {
887 data = PCI_DFLT_LTNCY;
888 isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data);
889 pci_write_config(dev, PCIR_LATTIMER, data, 1);
893 * Make sure we've disabled the ROM.
895 data = pci_read_config(dev, PCIR_ROMADDR, 4);
897 pci_write_config(dev, PCIR_ROMADDR, data, 4);
900 * Last minute checks...
902 if (IS_23XX(isp) || IS_24XX(isp)) {
903 isp->isp_port = pci_get_function(dev);
907 * Make sure we're in reset state.
910 if (isp_reinit(isp, 1) != 0) {
915 if (isp_attach(isp)) {
924 if (isp->isp_osinfo.fw == NULL && !IS_26XX(isp)) {
926 * Failure to attach at boot time might have been caused
927 * by a missing ispfw(4). Except for for 16Gb adapters,
928 * there's no loadable firmware for them.
930 isp_prt(isp, ISP_LOGWARN, "See the ispfw(4) man page on "
931 "how to load known good firmware at boot time");
933 for (i = 0; i < isp->isp_nirq; i++) {
934 (void) bus_teardown_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih);
935 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->irq[i].iqd,
939 pci_release_msi(dev);
942 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs);
944 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1);
946 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2);
947 if (pcs->pci_isp.isp_param) {
948 free(pcs->pci_isp.isp_param, M_DEVBUF);
949 pcs->pci_isp.isp_param = NULL;
951 if (pcs->pci_isp.isp_osinfo.pc.ptr) {
952 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF);
953 pcs->pci_isp.isp_osinfo.pc.ptr = NULL;
955 mtx_destroy(&isp->isp_lock);
960 isp_pci_detach(device_t dev)
962 struct isp_pcisoftc *pcs = device_get_softc(dev);
963 ispsoftc_t *isp = &pcs->pci_isp;
966 status = isp_detach(isp);
972 for (i = 0; i < isp->isp_nirq; i++) {
973 (void) bus_teardown_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih);
974 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->irq[i].iqd,
978 pci_release_msi(dev);
979 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs);
981 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1);
983 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2);
984 isp_pci_mbxdmafree(isp);
985 if (pcs->pci_isp.isp_param) {
986 free(pcs->pci_isp.isp_param, M_DEVBUF);
987 pcs->pci_isp.isp_param = NULL;
989 if (pcs->pci_isp.isp_osinfo.pc.ptr) {
990 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF);
991 pcs->pci_isp.isp_osinfo.pc.ptr = NULL;
993 mtx_destroy(&isp->isp_lock);
997 #define IspVirt2Off(a, x) \
998 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
999 _BLK_REG_SHFT] + ((x) & 0xfff))
1001 #define BXR2(isp, off) bus_read_2((isp)->isp_regs, (off))
1002 #define BXW2(isp, off, v) bus_write_2((isp)->isp_regs, (off), (v))
1003 #define BXR4(isp, off) bus_read_4((isp)->isp_regs, (off))
1004 #define BXW4(isp, off, v) bus_write_4((isp)->isp_regs, (off), (v))
1005 #define B2R4(isp, off) bus_read_4((isp)->isp_regs2, (off))
1006 #define B2W4(isp, off, v) bus_write_4((isp)->isp_regs2, (off), (v))
1008 static ISP_INLINE uint16_t
1009 isp_pci_rd_debounced(ispsoftc_t *isp, int off)
1013 val = BXR2(isp, IspVirt2Off(isp, off));
1016 val = BXR2(isp, IspVirt2Off(isp, off));
1017 } while (val != prev);
1022 isp_pci_run_isr(ispsoftc_t *isp)
1024 uint16_t isr, sema, info;
1027 isr = isp_pci_rd_debounced(isp, BIU_ISR);
1028 sema = isp_pci_rd_debounced(isp, BIU_SEMA);
1030 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR));
1031 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA));
1033 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1034 isr &= INT_PENDING_MASK(isp);
1035 sema &= BIU_SEMA_LOCK;
1036 if (isr == 0 && sema == 0)
1040 info = isp_pci_rd_debounced(isp, OUTMAILBOX0);
1042 info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0));
1043 if (info & MBOX_COMMAND_COMPLETE)
1044 isp_intr_mbox(isp, info);
1046 isp_intr_async(isp, info);
1047 if (!IS_FC(isp) && isp->isp_state == ISP_RUNSTATE)
1048 isp_intr_respq(isp);
1050 isp_intr_respq(isp);
1051 ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT);
1053 ISP_WRITE(isp, BIU_SEMA, 0);
1057 isp_pci_run_isr_2300(ispsoftc_t *isp)
1059 uint32_t hccr, r2hisr;
1062 if ((BXR2(isp, IspVirt2Off(isp, BIU_ISR)) & BIU2100_ISR_RISC_INT) == 0)
1064 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO));
1065 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1066 if ((r2hisr & BIU_R2HST_INTR) == 0)
1068 isr = r2hisr & BIU_R2HST_ISTAT_MASK;
1069 info = r2hisr >> 16;
1071 case ISPR2HST_ROM_MBX_OK:
1072 case ISPR2HST_ROM_MBX_FAIL:
1073 case ISPR2HST_MBX_OK:
1074 case ISPR2HST_MBX_FAIL:
1075 isp_intr_mbox(isp, info);
1077 case ISPR2HST_ASYNC_EVENT:
1078 isp_intr_async(isp, info);
1080 case ISPR2HST_RIO_16:
1081 isp_intr_async(isp, ASYNC_RIO16_1);
1083 case ISPR2HST_FPOST:
1084 isp_intr_async(isp, ASYNC_CMD_CMPLT);
1086 case ISPR2HST_FPOST_CTIO:
1087 isp_intr_async(isp, ASYNC_CTIO_DONE);
1089 case ISPR2HST_RSPQ_UPDATE:
1090 isp_intr_respq(isp);
1093 hccr = ISP_READ(isp, HCCR);
1094 if (hccr & HCCR_PAUSE) {
1095 ISP_WRITE(isp, HCCR, HCCR_RESET);
1096 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR));
1097 ISP_WRITE(isp, BIU_ICR, 0);
1099 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1102 ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT);
1103 ISP_WRITE(isp, BIU_SEMA, 0);
1107 isp_pci_run_isr_2400(ispsoftc_t *isp)
1112 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO));
1113 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1114 if ((r2hisr & BIU_R2HST_INTR) == 0)
1116 isr = r2hisr & BIU_R2HST_ISTAT_MASK;
1117 info = (r2hisr >> 16);
1119 case ISPR2HST_ROM_MBX_OK:
1120 case ISPR2HST_ROM_MBX_FAIL:
1121 case ISPR2HST_MBX_OK:
1122 case ISPR2HST_MBX_FAIL:
1123 isp_intr_mbox(isp, info);
1125 case ISPR2HST_ASYNC_EVENT:
1126 isp_intr_async(isp, info);
1128 case ISPR2HST_RSPQ_UPDATE:
1129 isp_intr_respq(isp);
1131 case ISPR2HST_RSPQ_UPDATE2:
1132 #ifdef ISP_TARGET_MODE
1133 case ISPR2HST_ATIO_RSPQ_UPDATE:
1135 isp_intr_respq(isp);
1137 #ifdef ISP_TARGET_MODE
1138 case ISPR2HST_ATIO_UPDATE:
1139 case ISPR2HST_ATIO_UPDATE2:
1140 isp_intr_atioq(isp);
1144 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1146 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
1150 isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1155 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1157 * We will assume that someone has paused the RISC processor.
1159 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1160 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP);
1161 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1163 rv = BXR2(isp, IspVirt2Off(isp, regoff));
1164 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1165 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf);
1166 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1172 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val)
1176 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1178 * We will assume that someone has paused the RISC processor.
1180 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1181 BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1182 oldconf | BIU_PCI_CONF1_SXP);
1183 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1185 BXW2(isp, IspVirt2Off(isp, regoff), val);
1186 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1187 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1188 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf);
1189 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1195 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1197 uint32_t rv, oc = 0;
1199 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1202 * We will assume that someone has paused the RISC processor.
1204 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1205 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1206 if (regoff & SXP_BANK1_SELECT)
1207 tc |= BIU_PCI1080_CONF1_SXP1;
1209 tc |= BIU_PCI1080_CONF1_SXP0;
1210 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc);
1211 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1212 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1213 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1214 BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1215 oc | BIU_PCI1080_CONF1_DMA);
1216 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1218 rv = BXR2(isp, IspVirt2Off(isp, regoff));
1220 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc);
1221 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1227 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val)
1231 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1234 * We will assume that someone has paused the RISC processor.
1236 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1237 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1238 if (regoff & SXP_BANK1_SELECT)
1239 tc |= BIU_PCI1080_CONF1_SXP1;
1241 tc |= BIU_PCI1080_CONF1_SXP0;
1242 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc);
1243 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1244 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1245 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1246 BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1247 oc | BIU_PCI1080_CONF1_DMA);
1248 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1250 BXW2(isp, IspVirt2Off(isp, regoff), val);
1251 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1253 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc);
1254 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1259 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff)
1262 int block = regoff & _BLK_REG_MASK;
1268 return (BXR2(isp, IspVirt2Off(isp, regoff)));
1270 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK read at 0x%x", regoff);
1271 return (0xffffffff);
1273 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK read at 0x%x", regoff);
1274 return (0xffffffff);
1276 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK read at 0x%x", regoff);
1277 return (0xffffffff);
1279 isp_prt(isp, ISP_LOGERR, "unknown block read at 0x%x", regoff);
1280 return (0xffffffff);
1284 case BIU2400_FLASH_ADDR:
1285 case BIU2400_FLASH_DATA:
1289 case BIU2400_REQINP:
1290 case BIU2400_REQOUTP:
1291 case BIU2400_RSPINP:
1292 case BIU2400_RSPOUTP:
1293 case BIU2400_PRI_REQINP:
1294 case BIU2400_PRI_REQOUTP:
1295 case BIU2400_ATIO_RSPINP:
1296 case BIU2400_ATIO_RSPOUTP:
1301 rv = BXR4(isp, IspVirt2Off(isp, regoff));
1303 case BIU2400_R2HSTSLO:
1304 rv = BXR4(isp, IspVirt2Off(isp, regoff));
1306 case BIU2400_R2HSTSHI:
1307 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16;
1310 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x",
1319 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val)
1321 int block = regoff & _BLK_REG_MASK;
1327 BXW2(isp, IspVirt2Off(isp, regoff), val);
1328 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1331 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK write at 0x%x", regoff);
1334 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK write at 0x%x", regoff);
1337 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK write at 0x%x", regoff);
1340 isp_prt(isp, ISP_LOGERR, "unknown block write at 0x%x", regoff);
1345 case BIU2400_FLASH_ADDR:
1346 case BIU2400_FLASH_DATA:
1350 case BIU2400_REQINP:
1351 case BIU2400_REQOUTP:
1352 case BIU2400_RSPINP:
1353 case BIU2400_RSPOUTP:
1354 case BIU2400_PRI_REQINP:
1355 case BIU2400_PRI_REQOUTP:
1356 case BIU2400_ATIO_RSPINP:
1357 case BIU2400_ATIO_RSPOUTP:
1362 BXW4(isp, IspVirt2Off(isp, regoff), val);
1363 #ifdef MEMORYBARRIERW
1364 if (regoff == BIU2400_REQINP ||
1365 regoff == BIU2400_RSPOUTP ||
1366 regoff == BIU2400_PRI_REQINP ||
1367 regoff == BIU2400_ATIO_RSPOUTP)
1368 MEMORYBARRIERW(isp, SYNC_REG,
1369 IspVirt2Off(isp, regoff), 4, -1)
1372 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1);
1375 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x",
1382 isp_pci_rd_reg_2600(ispsoftc_t *isp, int regoff)
1387 case BIU2400_PRI_REQINP:
1388 case BIU2400_PRI_REQOUTP:
1389 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x",
1393 case BIU2400_REQINP:
1394 rv = B2R4(isp, 0x00);
1396 case BIU2400_REQOUTP:
1397 rv = B2R4(isp, 0x04);
1399 case BIU2400_RSPINP:
1400 rv = B2R4(isp, 0x08);
1402 case BIU2400_RSPOUTP:
1403 rv = B2R4(isp, 0x0c);
1405 case BIU2400_ATIO_RSPINP:
1406 rv = B2R4(isp, 0x10);
1408 case BIU2400_ATIO_RSPOUTP:
1409 rv = B2R4(isp, 0x14);
1412 rv = isp_pci_rd_reg_2400(isp, regoff);
1419 isp_pci_wr_reg_2600(ispsoftc_t *isp, int regoff, uint32_t val)
1424 case BIU2400_PRI_REQINP:
1425 case BIU2400_PRI_REQOUTP:
1426 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x",
1429 case BIU2400_REQINP:
1432 case BIU2400_REQOUTP:
1435 case BIU2400_RSPINP:
1438 case BIU2400_RSPOUTP:
1441 case BIU2400_ATIO_RSPINP:
1444 case BIU2400_ATIO_RSPOUTP:
1448 isp_pci_wr_reg_2400(isp, regoff, val);
1451 B2W4(isp, off, val);
1461 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1463 struct imush *imushp = (struct imush *) arg;
1465 if (!(imushp->error = error))
1466 imushp->maddr = segs[0].ds_addr;
1470 isp_pci_mbxdma(ispsoftc_t *isp)
1473 uint32_t len, nsegs;
1474 int i, error, cmap = 0;
1475 bus_size_t slim; /* segment size */
1476 bus_addr_t llim; /* low limit of unavailable dma */
1477 bus_addr_t hlim; /* high limit of unavailable dma */
1481 /* Already been here? If so, leave... */
1482 if (isp->isp_xflist != NULL)
1484 if (isp->isp_rquest != NULL && isp->isp_maxcmds == 0)
1487 if (isp->isp_rquest != NULL)
1490 hlim = BUS_SPACE_MAXADDR;
1491 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1492 if (sizeof (bus_size_t) > 4)
1493 slim = (bus_size_t) (1ULL << 32);
1495 slim = (bus_size_t) (1UL << 31);
1496 llim = BUS_SPACE_MAXADDR;
1499 llim = BUS_SPACE_MAXADDR_32BIT;
1501 if (sizeof (bus_size_t) > 4)
1502 nsegs = ISP_NSEG64_MAX;
1504 nsegs = ISP_NSEG_MAX;
1506 if (bus_dma_tag_create(bus_get_dma_tag(ISP_PCD(isp)), 1,
1507 slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0,
1508 busdma_lock_mutex, &isp->isp_lock, &isp->isp_osinfo.dmat)) {
1510 isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1515 * Allocate and map the request queue and a region for external
1516 * DMA addressable command/status structures (22XX and later).
1518 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1519 if (isp->isp_type >= ISP_HA_FC_2200)
1520 len += (N_XCMDS * XCMD_SIZE);
1521 if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim,
1522 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1523 len, 1, len, 0, busdma_lock_mutex, &isp->isp_lock,
1524 &isp->isp_osinfo.reqdmat)) {
1525 isp_prt(isp, ISP_LOGERR, "cannot create request DMA tag");
1528 if (bus_dmamem_alloc(isp->isp_osinfo.reqdmat, (void **)&base,
1529 BUS_DMA_COHERENT, &isp->isp_osinfo.reqmap) != 0) {
1530 isp_prt(isp, ISP_LOGERR, "cannot allocate request DMA memory");
1531 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat);
1534 isp->isp_rquest = base;
1536 if (bus_dmamap_load(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap,
1537 base, len, imc, &im, 0) || im.error) {
1538 isp_prt(isp, ISP_LOGERR, "error loading request DMA map %d", im.error);
1541 isp_prt(isp, ISP_LOGDEBUG0, "request area @ 0x%jx/0x%jx",
1542 (uintmax_t)im.maddr, (uintmax_t)len);
1543 isp->isp_rquest_dma = im.maddr;
1544 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1545 im.maddr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1546 if (isp->isp_type >= ISP_HA_FC_2200) {
1547 isp->isp_osinfo.ecmd_dma = im.maddr;
1548 isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)base;
1549 isp->isp_osinfo.ecmd_base = isp->isp_osinfo.ecmd_free;
1550 for (ecmd = isp->isp_osinfo.ecmd_free;
1551 ecmd < &isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) {
1552 if (ecmd == &isp->isp_osinfo.ecmd_free[N_XCMDS - 1])
1555 ecmd->next = ecmd + 1;
1560 * Allocate and map the result queue.
1562 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1563 if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim,
1564 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1565 len, 1, len, 0, busdma_lock_mutex, &isp->isp_lock,
1566 &isp->isp_osinfo.respdmat)) {
1567 isp_prt(isp, ISP_LOGERR, "cannot create response DMA tag");
1570 if (bus_dmamem_alloc(isp->isp_osinfo.respdmat, (void **)&base,
1571 BUS_DMA_COHERENT, &isp->isp_osinfo.respmap) != 0) {
1572 isp_prt(isp, ISP_LOGERR, "cannot allocate response DMA memory");
1573 bus_dma_tag_destroy(isp->isp_osinfo.respdmat);
1576 isp->isp_result = base;
1578 if (bus_dmamap_load(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap,
1579 base, len, imc, &im, 0) || im.error) {
1580 isp_prt(isp, ISP_LOGERR, "error loading response DMA map %d", im.error);
1583 isp_prt(isp, ISP_LOGDEBUG0, "response area @ 0x%jx/0x%jx",
1584 (uintmax_t)im.maddr, (uintmax_t)len);
1585 isp->isp_result_dma = im.maddr;
1587 #ifdef ISP_TARGET_MODE
1589 * Allocate and map ATIO queue on 24xx with target mode.
1592 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1593 if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim,
1594 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1595 len, 1, len, 0, busdma_lock_mutex, &isp->isp_lock,
1596 &isp->isp_osinfo.atiodmat)) {
1597 isp_prt(isp, ISP_LOGERR, "cannot create ATIO DMA tag");
1600 if (bus_dmamem_alloc(isp->isp_osinfo.atiodmat, (void **)&base,
1601 BUS_DMA_COHERENT, &isp->isp_osinfo.atiomap) != 0) {
1602 isp_prt(isp, ISP_LOGERR, "cannot allocate ATIO DMA memory");
1603 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat);
1606 isp->isp_atioq = base;
1608 if (bus_dmamap_load(isp->isp_osinfo.atiodmat, isp->isp_osinfo.atiomap,
1609 base, len, imc, &im, 0) || im.error) {
1610 isp_prt(isp, ISP_LOGERR, "error loading ATIO DMA map %d", im.error);
1613 isp_prt(isp, ISP_LOGDEBUG0, "ATIO area @ 0x%jx/0x%jx",
1614 (uintmax_t)im.maddr, (uintmax_t)len);
1615 isp->isp_atioq_dma = im.maddr;
1620 if (bus_dma_tag_create(isp->isp_osinfo.dmat, 64, slim,
1621 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1622 2*QENTRY_LEN, 1, 2*QENTRY_LEN, 0, busdma_lock_mutex,
1623 &isp->isp_lock, &isp->isp_osinfo.iocbdmat)) {
1626 if (bus_dmamem_alloc(isp->isp_osinfo.iocbdmat,
1627 (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.iocbmap) != 0)
1629 isp->isp_iocb = base;
1631 if (bus_dmamap_load(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap,
1632 base, 2*QENTRY_LEN, imc, &im, 0) || im.error)
1634 isp->isp_iocb_dma = im.maddr;
1636 if (bus_dma_tag_create(isp->isp_osinfo.dmat, 64, slim,
1637 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1638 ISP_FC_SCRLEN, 1, ISP_FC_SCRLEN, 0, busdma_lock_mutex,
1639 &isp->isp_lock, &isp->isp_osinfo.scdmat))
1641 for (cmap = 0; cmap < isp->isp_nchan; cmap++) {
1642 struct isp_fc *fc = ISP_FC_PC(isp, cmap);
1643 if (bus_dmamem_alloc(isp->isp_osinfo.scdmat,
1644 (void **)&base, BUS_DMA_COHERENT, &fc->scmap) != 0)
1646 FCPARAM(isp, cmap)->isp_scratch = base;
1648 if (bus_dmamap_load(isp->isp_osinfo.scdmat, fc->scmap,
1649 base, ISP_FC_SCRLEN, imc, &im, 0) || im.error) {
1650 bus_dmamem_free(isp->isp_osinfo.scdmat,
1652 FCPARAM(isp, cmap)->isp_scratch = NULL;
1655 FCPARAM(isp, cmap)->isp_scdma = im.maddr;
1656 if (!IS_2100(isp)) {
1657 for (i = 0; i < INITIAL_NEXUS_COUNT; i++) {
1658 struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO);
1660 while (fc->nexus_free_list) {
1661 n = fc->nexus_free_list;
1662 fc->nexus_free_list = n->next;
1667 n->next = fc->nexus_free_list;
1668 fc->nexus_free_list = n;
1674 if (isp->isp_maxcmds == 0) {
1680 len = isp->isp_maxcmds * sizeof (struct isp_pcmd);
1681 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *)
1682 malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1683 for (i = 0; i < isp->isp_maxcmds; i++) {
1684 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i];
1685 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap);
1687 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error);
1689 bus_dmamap_destroy(isp->isp_osinfo.dmat,
1690 isp->isp_osinfo.pcmd_pool[i].dmap);
1694 callout_init_mtx(&pcmd->wdog, &isp->isp_lock, 0);
1695 if (i == isp->isp_maxcmds-1)
1698 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1];
1700 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0];
1702 len = sizeof (isp_hdl_t) * isp->isp_maxcmds;
1703 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1704 for (len = 0; len < isp->isp_maxcmds - 1; len++)
1705 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1];
1706 isp->isp_xffree = isp->isp_xflist;
1712 isp_pci_mbxdmafree(isp);
1718 isp_pci_mbxdmafree(ispsoftc_t *isp)
1722 if (isp->isp_xflist != NULL) {
1723 free(isp->isp_xflist, M_DEVBUF);
1724 isp->isp_xflist = NULL;
1726 if (isp->isp_osinfo.pcmd_pool != NULL) {
1727 for (i = 0; i < isp->isp_maxcmds; i++) {
1728 bus_dmamap_destroy(isp->isp_osinfo.dmat,
1729 isp->isp_osinfo.pcmd_pool[i].dmap);
1731 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1732 isp->isp_osinfo.pcmd_pool = NULL;
1735 for (i = 0; i < isp->isp_nchan; i++) {
1736 struct isp_fc *fc = ISP_FC_PC(isp, i);
1737 if (FCPARAM(isp, i)->isp_scdma != 0) {
1738 bus_dmamap_unload(isp->isp_osinfo.scdmat,
1740 FCPARAM(isp, i)->isp_scdma = 0;
1742 if (FCPARAM(isp, i)->isp_scratch != NULL) {
1743 bus_dmamem_free(isp->isp_osinfo.scdmat,
1744 FCPARAM(isp, i)->isp_scratch, fc->scmap);
1745 FCPARAM(isp, i)->isp_scratch = NULL;
1747 while (fc->nexus_free_list) {
1748 struct isp_nexus *n = fc->nexus_free_list;
1749 fc->nexus_free_list = n->next;
1753 if (isp->isp_iocb_dma != 0) {
1754 bus_dma_tag_destroy(isp->isp_osinfo.scdmat);
1755 bus_dmamap_unload(isp->isp_osinfo.iocbdmat,
1756 isp->isp_osinfo.iocbmap);
1757 isp->isp_iocb_dma = 0;
1759 if (isp->isp_iocb != NULL) {
1760 bus_dmamem_free(isp->isp_osinfo.iocbdmat,
1761 isp->isp_iocb, isp->isp_osinfo.iocbmap);
1762 bus_dma_tag_destroy(isp->isp_osinfo.iocbdmat);
1765 #ifdef ISP_TARGET_MODE
1767 if (isp->isp_atioq_dma != 0) {
1768 bus_dmamap_unload(isp->isp_osinfo.atiodmat,
1769 isp->isp_osinfo.atiomap);
1770 isp->isp_atioq_dma = 0;
1772 if (isp->isp_atioq != NULL) {
1773 bus_dmamem_free(isp->isp_osinfo.atiodmat, isp->isp_atioq,
1774 isp->isp_osinfo.atiomap);
1775 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat);
1776 isp->isp_atioq = NULL;
1780 if (isp->isp_result_dma != 0) {
1781 bus_dmamap_unload(isp->isp_osinfo.respdmat,
1782 isp->isp_osinfo.respmap);
1783 isp->isp_result_dma = 0;
1785 if (isp->isp_result != NULL) {
1786 bus_dmamem_free(isp->isp_osinfo.respdmat, isp->isp_result,
1787 isp->isp_osinfo.respmap);
1788 bus_dma_tag_destroy(isp->isp_osinfo.respdmat);
1789 isp->isp_result = NULL;
1791 if (isp->isp_rquest_dma != 0) {
1792 bus_dmamap_unload(isp->isp_osinfo.reqdmat,
1793 isp->isp_osinfo.reqmap);
1794 isp->isp_rquest_dma = 0;
1796 if (isp->isp_rquest != NULL) {
1797 bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_rquest,
1798 isp->isp_osinfo.reqmap);
1799 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat);
1800 isp->isp_rquest = NULL;
1807 void *rq; /* original request */
1811 #define MUSHERR_NOQENTRIES -2
1814 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1816 mush_t *mp = (mush_t *) arg;
1817 ispsoftc_t *isp= mp->isp;
1818 struct ccb_scsiio *csio = mp->cmd_token;
1829 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1830 ddir = ISP_FROM_DEVICE;
1832 ddir = ISP_TO_DEVICE;
1834 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
1835 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)) {
1836 sdir = BUS_DMASYNC_PREREAD;
1838 sdir = BUS_DMASYNC_PREWRITE;
1840 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
1844 error = isp_send_cmd(isp, mp->rq, dm_segs, nseg, XS_XFRLEN(csio),
1845 ddir, (ispds64_t *)csio->req_map);
1848 mp->error = MUSHERR_NOQENTRIES;
1859 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff)
1866 mp->cmd_token = csio;
1870 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
1871 (union ccb *)csio, dma2, mp, 0);
1872 if (error == EINPROGRESS) {
1873 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
1875 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported");
1876 } else if (error && mp->error == 0) {
1878 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error);
1883 int retval = CMD_COMPLETE;
1884 if (mp->error == MUSHERR_NOQENTRIES) {
1885 retval = CMD_EAGAIN;
1886 } else if (mp->error == EFBIG) {
1887 csio->ccb_h.status = CAM_REQ_TOO_BIG;
1888 } else if (mp->error == EINVAL) {
1889 csio->ccb_h.status = CAM_REQ_INVALID;
1891 csio->ccb_h.status = CAM_UNREC_HBA_ERROR;
1895 return (CMD_QUEUED);
1899 isp_pci_irqsetup(ispsoftc_t *isp)
1901 device_t dev = isp->isp_osinfo.dev;
1902 struct isp_pcisoftc *pcs = device_get_softc(dev);
1906 /* Allocate IRQs only once. */
1907 if (isp->isp_nirq > 0)
1911 if (ISP_CAP_MSIX(isp)) {
1912 max_irq = IS_26XX(isp) ? 3 : (IS_25XX(isp) ? 2 : 0);
1913 resource_int_value(device_get_name(dev),
1914 device_get_unit(dev), "msix", &max_irq);
1915 max_irq = imin(ISP_MAX_IRQS, max_irq);
1916 pcs->msicount = imin(pci_msix_count(dev), max_irq);
1917 if (pcs->msicount > 0 &&
1918 pci_alloc_msix(dev, &pcs->msicount) != 0)
1921 if (pcs->msicount == 0) {
1923 resource_int_value(device_get_name(dev),
1924 device_get_unit(dev), "msi", &max_irq);
1925 max_irq = imin(1, max_irq);
1926 pcs->msicount = imin(pci_msi_count(dev), max_irq);
1927 if (pcs->msicount > 0 &&
1928 pci_alloc_msi(dev, &pcs->msicount) != 0)
1931 for (i = 0; i < MAX(1, pcs->msicount); i++) {
1932 pcs->irq[i].iqd = i + (pcs->msicount > 0);
1933 pcs->irq[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1934 &pcs->irq[i].iqd, RF_ACTIVE | RF_SHAREABLE);
1935 if (pcs->irq[i].irq == NULL) {
1936 device_printf(dev, "could not allocate interrupt\n");
1940 f = isp_platform_intr;
1942 f = isp_platform_intr_resp;
1944 f = isp_platform_intr_atio;
1945 if (bus_setup_intr(dev, pcs->irq[i].irq, ISP_IFLAGS, NULL,
1946 f, isp, &pcs->irq[i].ih)) {
1947 device_printf(dev, "could not setup interrupt\n");
1948 (void) bus_release_resource(dev, SYS_RES_IRQ,
1949 pcs->irq[i].iqd, pcs->irq[i].irq);
1952 if (pcs->msicount > 1) {
1953 bus_describe_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih,
1956 isp->isp_nirq = i + 1;
1960 return (isp->isp_nirq == 0);
1964 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
1966 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1968 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
1970 printf("%s:\n", device_get_nameunit(isp->isp_dev));
1972 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1974 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1975 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1976 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1977 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1981 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1982 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1983 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1984 ISP_READ(isp, CDMA_FIFO_STS));
1985 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1986 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1987 ISP_READ(isp, DDMA_FIFO_STS));
1988 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1989 ISP_READ(isp, SXP_INTERRUPT),
1990 ISP_READ(isp, SXP_GROSS_ERR),
1991 ISP_READ(isp, SXP_PINS_CTRL));
1992 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1994 printf(" mbox regs: %x %x %x %x %x\n",
1995 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1996 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1997 ISP_READ(isp, OUTMAILBOX4));
1998 printf(" PCI Status Command/Status=%x\n",
1999 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));