3 * Copyright (c) 1997-2006 by Matthew Jacob
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice immediately at the beginning of the file, without modification,
11 * this list of conditions, and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #if __FreeBSD_version >= 700000
40 #include <sys/linker.h>
41 #include <sys/firmware.h>
44 #if __FreeBSD_version < 500000
45 #include <pci/pcireg.h>
46 #include <pci/pcivar.h>
47 #include <machine/bus_memio.h>
48 #include <machine/bus_pio.h>
50 #include <sys/stdint.h>
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
54 #include <machine/bus.h>
55 #include <machine/resource.h>
57 #include <sys/malloc.h>
59 #include <dev/isp/isp_freebsd.h>
61 #if __FreeBSD_version < 500000
62 #define BUS_PROBE_DEFAULT 0
65 static uint16_t isp_pci_rd_reg(ispsoftc_t *, int);
66 static void isp_pci_wr_reg(ispsoftc_t *, int, uint16_t);
67 static uint16_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
68 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint16_t);
70 isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
72 isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
73 static int isp_pci_mbxdma(ispsoftc_t *);
75 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint16_t *, uint16_t);
77 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint16_t);
80 static void isp_pci_reset1(ispsoftc_t *);
81 static void isp_pci_dumpregs(ispsoftc_t *, const char *);
83 static struct ispmdvec mdvec = {
94 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
97 static struct ispmdvec mdvec_1080 = {
108 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
111 static struct ispmdvec mdvec_12160 = {
122 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
125 static struct ispmdvec mdvec_2100 = {
137 static struct ispmdvec mdvec_2200 = {
149 static struct ispmdvec mdvec_2300 = {
161 #ifndef PCIM_CMD_INVEN
162 #define PCIM_CMD_INVEN 0x10
164 #ifndef PCIM_CMD_BUSMASTEREN
165 #define PCIM_CMD_BUSMASTEREN 0x0004
167 #ifndef PCIM_CMD_PERRESPEN
168 #define PCIM_CMD_PERRESPEN 0x0040
170 #ifndef PCIM_CMD_SEREN
171 #define PCIM_CMD_SEREN 0x0100
173 #ifndef PCIM_CMD_INTX_DISABLE
174 #define PCIM_CMD_INTX_DISABLE 0x0400
178 #define PCIR_COMMAND 0x04
181 #ifndef PCIR_CACHELNSZ
182 #define PCIR_CACHELNSZ 0x0c
185 #ifndef PCIR_LATTIMER
186 #define PCIR_LATTIMER 0x0d
190 #define PCIR_ROMADDR 0x30
193 #ifndef PCI_VENDOR_QLOGIC
194 #define PCI_VENDOR_QLOGIC 0x1077
197 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
198 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
201 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
202 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
205 #ifndef PCI_PRODUCT_QLOGIC_ISP10160
206 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
209 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
210 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
213 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
214 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
217 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
218 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
221 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
222 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
225 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
226 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
229 #ifndef PCI_PRODUCT_QLOGIC_ISP2300
230 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
233 #ifndef PCI_PRODUCT_QLOGIC_ISP2312
234 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
237 #ifndef PCI_PRODUCT_QLOGIC_ISP2322
238 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322
241 #ifndef PCI_PRODUCT_QLOGIC_ISP2422
242 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422
245 #ifndef PCI_PRODUCT_QLOGIC_ISP6312
246 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312
249 #ifndef PCI_PRODUCT_QLOGIC_ISP6322
250 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322
254 #define PCI_QLOGIC_ISP1020 \
255 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
257 #define PCI_QLOGIC_ISP1080 \
258 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
260 #define PCI_QLOGIC_ISP10160 \
261 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
263 #define PCI_QLOGIC_ISP12160 \
264 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
266 #define PCI_QLOGIC_ISP1240 \
267 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
269 #define PCI_QLOGIC_ISP1280 \
270 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
272 #define PCI_QLOGIC_ISP2100 \
273 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
275 #define PCI_QLOGIC_ISP2200 \
276 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
278 #define PCI_QLOGIC_ISP2300 \
279 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
281 #define PCI_QLOGIC_ISP2312 \
282 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
284 #define PCI_QLOGIC_ISP2322 \
285 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
287 #define PCI_QLOGIC_ISP2422 \
288 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
290 #define PCI_QLOGIC_ISP6312 \
291 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
293 #define PCI_QLOGIC_ISP6322 \
294 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
297 * Odd case for some AMI raid cards... We need to *not* attach to this.
299 #define AMI_RAID_SUBVENDOR_ID 0x101e
301 #define IO_MAP_REG 0x10
302 #define MEM_MAP_REG 0x14
304 #define PCI_DFLT_LTNCY 0x40
305 #define PCI_DFLT_LNSZ 0x10
307 static int isp_pci_probe (device_t);
308 static int isp_pci_attach (device_t);
311 struct isp_pcisoftc {
314 struct resource * pci_reg;
315 bus_space_tag_t pci_st;
316 bus_space_handle_t pci_sh;
318 int16_t pci_poff[_NREG_BLKS];
323 static device_method_t isp_pci_methods[] = {
324 /* Device interface */
325 DEVMETHOD(device_probe, isp_pci_probe),
326 DEVMETHOD(device_attach, isp_pci_attach),
329 static void isp_pci_intr(void *);
331 static driver_t isp_pci_driver = {
332 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
334 static devclass_t isp_devclass;
335 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
336 #if __FreeBSD_version >= 700000
337 MODULE_DEPEND(isp, ispfw, 1, 1, 1);
338 MODULE_DEPEND(isp, firmware, 1, 1, 1);
340 extern ispfwfunc *isp_get_firmware_p;
344 isp_pci_probe(device_t dev)
346 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
347 case PCI_QLOGIC_ISP1020:
348 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
350 case PCI_QLOGIC_ISP1080:
351 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
353 case PCI_QLOGIC_ISP1240:
354 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
356 case PCI_QLOGIC_ISP1280:
357 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
359 case PCI_QLOGIC_ISP10160:
360 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
362 case PCI_QLOGIC_ISP12160:
363 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
366 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
368 case PCI_QLOGIC_ISP2100:
369 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
371 case PCI_QLOGIC_ISP2200:
372 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
374 case PCI_QLOGIC_ISP2300:
375 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
377 case PCI_QLOGIC_ISP2312:
378 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
380 case PCI_QLOGIC_ISP2322:
381 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
384 case PCI_QLOGIC_ISP2422:
385 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
388 case PCI_QLOGIC_ISP6312:
389 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
391 case PCI_QLOGIC_ISP6322:
392 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
397 if (isp_announced == 0 && bootverbose) {
398 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
399 "Core Version %d.%d\n",
400 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
401 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
405 * XXXX: Here is where we might load the f/w module
406 * XXXX: (or increase a reference count to it).
408 return (BUS_PROBE_DEFAULT);
411 #if __FreeBSD_version < 500000
413 isp_get_options(device_t dev, ispsoftc_t *isp)
418 unit = device_get_unit(dev);
419 if (getenv_int("isp_disable", &bitmap)) {
420 if (bitmap & (1 << unit)) {
421 isp->isp_osinfo.disabled = 1;
426 if (getenv_int("isp_no_fwload", &bitmap)) {
427 if (bitmap & (1 << unit))
428 isp->isp_confopts |= ISP_CFG_NORELOAD;
430 if (getenv_int("isp_fwload", &bitmap)) {
431 if (bitmap & (1 << unit))
432 isp->isp_confopts &= ~ISP_CFG_NORELOAD;
434 if (getenv_int("isp_no_nvram", &bitmap)) {
435 if (bitmap & (1 << unit))
436 isp->isp_confopts |= ISP_CFG_NONVRAM;
438 if (getenv_int("isp_nvram", &bitmap)) {
439 if (bitmap & (1 << unit))
440 isp->isp_confopts &= ~ISP_CFG_NONVRAM;
442 if (getenv_int("isp_fcduplex", &bitmap)) {
443 if (bitmap & (1 << unit))
444 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
446 if (getenv_int("isp_no_fcduplex", &bitmap)) {
447 if (bitmap & (1 << unit))
448 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
450 if (getenv_int("isp_nport", &bitmap)) {
451 if (bitmap & (1 << unit))
452 isp->isp_confopts |= ISP_CFG_NPORT;
456 * Because the resource_*_value functions can neither return
457 * 64 bit integer values, nor can they be directly coerced
458 * to interpret the right hand side of the assignment as
459 * you want them to interpret it, we have to force WWN
460 * hint replacement to specify WWN strings with a leading
461 * 'w' (e..g w50000000aaaa0001). Sigh.
463 if (getenv_quad("isp_portwwn", &wwn)) {
464 isp->isp_osinfo.default_port_wwn = wwn;
465 isp->isp_confopts |= ISP_CFG_OWNWWPN;
467 if (isp->isp_osinfo.default_port_wwn == 0) {
468 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
471 if (getenv_quad("isp_nodewwn", &wwn)) {
472 isp->isp_osinfo.default_node_wwn = wwn;
473 isp->isp_confopts |= ISP_CFG_OWNWWNN;
475 if (isp->isp_osinfo.default_node_wwn == 0) {
476 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
480 (void) getenv_int("isp_debug", &bitmap);
482 isp->isp_dblev = bitmap;
484 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
487 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
490 #ifdef ISP_FW_CRASH_DUMP
492 if (getenv_int("isp_fw_dump_enable", &bitmap)) {
493 if (bitmap & (1 << unit) {
496 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
497 } else if (IS_23XX(isp)) {
498 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
501 FCPARAM(isp)->isp_dump_data =
502 malloc(amt, M_DEVBUF, M_WAITOK);
503 memset(FCPARAM(isp)->isp_dump_data, 0, amt);
506 "f/w crash dumps not supported for card\n");
512 if (getenv_int("role", &bitmap)) {
513 isp->isp_role = bitmap;
515 isp->isp_role = ISP_DEFAULT_ROLES;
520 isp_get_pci_options(device_t dev, int *m1, int *m2)
523 int unit = device_get_unit(dev);
525 *m1 = PCIM_CMD_MEMEN;
526 *m2 = PCIM_CMD_PORTEN;
527 if (getenv_int("isp_mem_map", &bitmap)) {
528 if (bitmap & (1 << unit)) {
529 *m1 = PCIM_CMD_MEMEN;
530 *m2 = PCIM_CMD_PORTEN;
534 if (getenv_int("isp_io_map", &bitmap)) {
535 if (bitmap & (1 << unit)) {
536 *m1 = PCIM_CMD_PORTEN;
537 *m2 = PCIM_CMD_MEMEN;
543 isp_get_options(device_t dev, ispsoftc_t *isp)
548 * Figure out if we're supposed to skip this one.
552 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
553 "disable", &tval) == 0 && tval) {
554 device_printf(dev, "disabled at user request\n");
555 isp->isp_osinfo.disabled = 1;
560 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
561 "role", &tval) == 0 && tval != -1) {
562 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET);
563 isp->isp_role = tval;
564 device_printf(dev, "setting role to 0x%x\n", isp->isp_role);
566 #ifdef ISP_TARGET_MODE
567 isp->isp_role = ISP_ROLE_TARGET;
569 isp->isp_role = ISP_DEFAULT_ROLES;
574 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
575 "fwload_disable", &tval) == 0 && tval != 0) {
576 isp->isp_confopts |= ISP_CFG_NORELOAD;
579 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
580 "ignore_nvram", &tval) == 0 && tval != 0) {
581 isp->isp_confopts |= ISP_CFG_NONVRAM;
584 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
585 "fullduplex", &tval) == 0 && tval != 0) {
586 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
588 #ifdef ISP_FW_CRASH_DUMP
590 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
591 "fw_dump_enable", &tval) == 0 && tval != 0) {
594 amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
595 } else if (IS_23XX(isp)) {
596 amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
599 FCPARAM(isp)->isp_dump_data =
600 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
603 "f/w crash dumps not supported for this model\n");
609 if (resource_string_value(device_get_name(dev), device_get_unit(dev),
610 "topology", (const char **) &sptr) == 0 && sptr != 0) {
611 if (strcmp(sptr, "lport") == 0) {
612 isp->isp_confopts |= ISP_CFG_LPORT;
613 } else if (strcmp(sptr, "nport") == 0) {
614 isp->isp_confopts |= ISP_CFG_NPORT;
615 } else if (strcmp(sptr, "lport-only") == 0) {
616 isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
617 } else if (strcmp(sptr, "nport-only") == 0) {
618 isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
623 * Because the resource_*_value functions can neither return
624 * 64 bit integer values, nor can they be directly coerced
625 * to interpret the right hand side of the assignment as
626 * you want them to interpret it, we have to force WWN
627 * hint replacement to specify WWN strings with a leading
628 * 'w' (e..g w50000000aaaa0001). Sigh.
631 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
632 "portwwn", (const char **) &sptr);
633 if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
635 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
636 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
637 device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
638 isp->isp_osinfo.default_port_wwn = 0;
640 isp->isp_confopts |= ISP_CFG_OWNWWPN;
643 if (isp->isp_osinfo.default_port_wwn == 0) {
644 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
648 tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
649 "nodewwn", (const char **) &sptr);
650 if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
652 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
653 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
654 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
655 isp->isp_osinfo.default_node_wwn = 0;
657 isp->isp_confopts |= ISP_CFG_OWNWWNN;
660 if (isp->isp_osinfo.default_node_wwn == 0) {
661 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
664 isp->isp_osinfo.default_id = -1;
665 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
666 "iid", &tval) == 0) {
667 isp->isp_osinfo.default_id = tval;
668 isp->isp_confopts |= ISP_CFG_OWNLOOPID;
670 if (isp->isp_osinfo.default_id == -1) {
672 isp->isp_osinfo.default_id = 109;
674 isp->isp_osinfo.default_id = 7;
679 * Set up logging levels.
682 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
685 isp->isp_dblev = tval;
687 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
690 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
696 isp_get_pci_options(device_t dev, int *m1, int *m2)
700 * Which we should try first - memory mapping or i/o mapping?
702 * We used to try memory first followed by i/o on alpha, otherwise
703 * the reverse, but we should just try memory first all the time now.
705 *m1 = PCIM_CMD_MEMEN;
706 *m2 = PCIM_CMD_PORTEN;
709 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
710 "prefer_iomap", &tval) == 0 && tval != 0) {
711 *m1 = PCIM_CMD_PORTEN;
712 *m2 = PCIM_CMD_MEMEN;
715 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
716 "prefer_memmap", &tval) == 0 && tval != 0) {
717 *m1 = PCIM_CMD_MEMEN;
718 *m2 = PCIM_CMD_PORTEN;
724 isp_pci_attach(device_t dev)
726 struct resource *regs, *irq;
727 int rtp, rgd, iqd, m1, m2;
728 uint32_t data, cmd, linesz, psize, basetype;
729 struct isp_pcisoftc *pcs;
730 ispsoftc_t *isp = NULL;
731 struct ispmdvec *mdvp;
732 #if __FreeBSD_version >= 500000
736 pcs = device_get_softc(dev);
738 device_printf(dev, "cannot get softc\n");
741 memset(pcs, 0, sizeof (*pcs));
746 * Get Generic Options
748 isp_get_options(dev, isp);
751 * Check to see if options have us disabled
753 if (isp->isp_osinfo.disabled) {
755 * But return zero to preserve unit numbering
761 * Get PCI options- which in this case are just mapping preferences.
763 isp_get_pci_options(dev, &m1, &m2);
766 linesz = PCI_DFLT_LNSZ;
770 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
772 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
773 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
774 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
776 if (regs == NULL && (cmd & m2)) {
777 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
778 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
779 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
782 device_printf(dev, "unable to map any ports\n");
786 device_printf(dev, "using %s space register mapping\n",
787 (rgd == IO_MAP_REG)? "I/O" : "Memory");
791 pcs->pci_st = rman_get_bustag(regs);
792 pcs->pci_sh = rman_get_bushandle(regs);
794 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
795 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
796 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
797 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
798 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
800 basetype = ISP_HA_SCSI_UNKNOWN;
801 psize = sizeof (sdparam);
802 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
804 basetype = ISP_HA_SCSI_UNKNOWN;
805 psize = sizeof (sdparam);
807 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
809 basetype = ISP_HA_SCSI_1080;
810 psize = sizeof (sdparam);
811 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
812 ISP1080_DMA_REGS_OFF;
814 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
816 basetype = ISP_HA_SCSI_1240;
817 psize = 2 * sizeof (sdparam);
818 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
819 ISP1080_DMA_REGS_OFF;
821 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
823 basetype = ISP_HA_SCSI_1280;
824 psize = 2 * sizeof (sdparam);
825 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
826 ISP1080_DMA_REGS_OFF;
828 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
830 basetype = ISP_HA_SCSI_10160;
831 psize = sizeof (sdparam);
832 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
833 ISP1080_DMA_REGS_OFF;
835 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
837 basetype = ISP_HA_SCSI_12160;
838 psize = 2 * sizeof (sdparam);
839 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
840 ISP1080_DMA_REGS_OFF;
842 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
844 basetype = ISP_HA_FC_2100;
845 psize = sizeof (fcparam);
846 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
847 PCI_MBOX_REGS2100_OFF;
848 if (pci_get_revid(dev) < 3) {
850 * XXX: Need to get the actual revision
851 * XXX: number of the 2100 FB. At any rate,
852 * XXX: lower cache line size for early revision
858 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
860 basetype = ISP_HA_FC_2200;
861 psize = sizeof (fcparam);
862 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
863 PCI_MBOX_REGS2100_OFF;
865 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
867 basetype = ISP_HA_FC_2300;
868 psize = sizeof (fcparam);
869 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
870 PCI_MBOX_REGS2300_OFF;
872 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 ||
873 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
875 basetype = ISP_HA_FC_2312;
876 psize = sizeof (fcparam);
877 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
878 PCI_MBOX_REGS2300_OFF;
880 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 ||
881 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) {
883 basetype = ISP_HA_FC_2322;
884 psize = sizeof (fcparam);
885 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
886 PCI_MBOX_REGS2300_OFF;
888 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) {
890 basetype = ISP_HA_FC_2422;
891 psize = sizeof (fcparam);
892 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
893 PCI_MBOX_REGS2300_OFF;
896 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
897 if (isp->isp_param == NULL) {
898 device_printf(dev, "cannot allocate parameter data\n");
901 isp->isp_mdvec = mdvp;
902 isp->isp_type = basetype;
903 isp->isp_revision = pci_get_revid(dev);
906 #if __FreeBSD_version >= 700000
908 * Try and find firmware for this device.
912 unsigned int did = pci_get_device(dev);
915 * Map a few pci ids to fw names
918 case PCI_PRODUCT_QLOGIC_ISP1020:
921 case PCI_PRODUCT_QLOGIC_ISP1240:
924 case PCI_PRODUCT_QLOGIC_ISP10160:
925 case PCI_PRODUCT_QLOGIC_ISP12160:
928 case PCI_PRODUCT_QLOGIC_ISP6312:
929 case PCI_PRODUCT_QLOGIC_ISP2312:
932 case PCI_PRODUCT_QLOGIC_ISP6322:
939 isp->isp_osinfo.fw = NULL;
940 if (isp->isp_role & ISP_ROLE_TARGET) {
941 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did);
942 isp->isp_osinfo.fw = firmware_get(fwname);
944 if (isp->isp_osinfo.fw == NULL) {
945 snprintf(fwname, sizeof (fwname), "isp_%04x", did);
946 isp->isp_osinfo.fw = firmware_get(fwname);
948 if (isp->isp_osinfo.fw != NULL) {
953 u.fred = isp->isp_osinfo.fw->data;
954 isp->isp_mdvec->dv_ispfw = u.bob;
958 if (isp_get_firmware_p) {
959 int device = (int) pci_get_device(dev);
960 #ifdef ISP_TARGET_MODE
961 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
963 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
969 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
972 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
973 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
975 if (IS_2300(isp)) { /* per QLogic errata */
976 cmd &= ~PCIM_CMD_INVEN;
981 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
983 isp->isp_touched = 1;
987 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
988 cmd &= ~PCIM_CMD_INTX_DISABLE;
991 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
994 * Make sure the Cache Line Size register is set sensibly.
996 data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
997 if (data != linesz) {
998 data = PCI_DFLT_LNSZ;
999 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
1000 pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
1004 * Make sure the Latency Timer is sane.
1006 data = pci_read_config(dev, PCIR_LATTIMER, 1);
1007 if (data < PCI_DFLT_LTNCY) {
1008 data = PCI_DFLT_LTNCY;
1009 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
1010 pci_write_config(dev, PCIR_LATTIMER, data, 1);
1014 * Make sure we've disabled the ROM.
1016 data = pci_read_config(dev, PCIR_ROMADDR, 4);
1018 pci_write_config(dev, PCIR_ROMADDR, data, 4);
1021 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
1022 RF_ACTIVE | RF_SHAREABLE);
1024 device_printf(dev, "could not allocate interrupt\n");
1028 #if __FreeBSD_version >= 500000
1029 /* Make sure the lock is set up. */
1030 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
1034 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) {
1035 device_printf(dev, "could not setup interrupt\n");
1040 * Last minute checks...
1043 isp->isp_port = pci_get_function(dev);
1047 * Make sure we're in reset state.
1051 if (isp->isp_state != ISP_RESETSTATE) {
1056 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
1062 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
1068 * XXXX: Here is where we might unload the f/w module
1069 * XXXX: (or decrease the reference count to it).
1076 if (pcs && pcs->ih) {
1077 (void) bus_teardown_intr(dev, irq, pcs->ih);
1080 #if __FreeBSD_version >= 500000
1081 if (locksetup && isp) {
1082 mtx_destroy(&isp->isp_osinfo.lock);
1087 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
1092 (void) bus_release_resource(dev, rtp, rgd, regs);
1096 if (pcs->pci_isp.isp_param) {
1097 #ifdef ISP_FW_CRASH_DUMP
1098 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) {
1099 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF);
1102 free(pcs->pci_isp.isp_param, M_DEVBUF);
1107 * XXXX: Here is where we might unload the f/w module
1108 * XXXX: (or decrease the reference count to it).
1114 isp_pci_intr(void *arg)
1116 ispsoftc_t *isp = arg;
1117 uint16_t isr, sema, mbox;
1121 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1122 isp->isp_intbogus++;
1124 int iok = isp->isp_osinfo.intsok;
1125 isp->isp_osinfo.intsok = 0;
1126 isp_intr(isp, isr, sema, mbox);
1127 isp->isp_osinfo.intsok = iok;
1133 #define IspVirt2Off(a, x) \
1134 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1135 _BLK_REG_SHFT] + ((x) & 0xfff))
1137 #define BXR2(pcs, off) \
1138 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
1139 #define BXW2(pcs, off, v) \
1140 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
1144 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp)
1146 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1147 uint16_t val0, val1;
1151 val0 = BXR2(pcs, IspVirt2Off(isp, off));
1152 val1 = BXR2(pcs, IspVirt2Off(isp, off));
1153 } while (val0 != val1 && ++i < 1000);
1162 isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp,
1163 uint16_t *semap, uint16_t *mbp)
1165 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1169 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
1172 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
1176 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
1177 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
1179 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1180 isr &= INT_PENDING_MASK(isp);
1181 sema &= BIU_SEMA_LOCK;
1182 if (isr == 0 && sema == 0) {
1186 if ((*semap = sema) != 0) {
1188 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
1192 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
1199 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp,
1200 uint16_t *semap, uint16_t *mbox0p)
1202 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1206 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
1210 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
1211 IspVirt2Off(pcs, BIU_R2HSTSLO));
1212 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1213 if ((r2hisr & BIU_R2HST_INTR) == 0) {
1217 switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
1218 case ISPR2HST_ROM_MBX_OK:
1219 case ISPR2HST_ROM_MBX_FAIL:
1220 case ISPR2HST_MBX_OK:
1221 case ISPR2HST_MBX_FAIL:
1222 case ISPR2HST_ASYNC_EVENT:
1223 *isrp = r2hisr & 0xffff;
1224 *mbox0p = (r2hisr >> 16);
1227 case ISPR2HST_RIO_16:
1228 *isrp = r2hisr & 0xffff;
1229 *mbox0p = ASYNC_RIO1;
1232 case ISPR2HST_FPOST:
1233 *isrp = r2hisr & 0xffff;
1234 *mbox0p = ASYNC_CMD_CMPLT;
1237 case ISPR2HST_FPOST_CTIO:
1238 *isrp = r2hisr & 0xffff;
1239 *mbox0p = ASYNC_CTIO_DONE;
1242 case ISPR2HST_RSPQ_UPDATE:
1243 *isrp = r2hisr & 0xffff;
1248 hccr = ISP_READ(isp, HCCR);
1249 if (hccr & HCCR_PAUSE) {
1250 ISP_WRITE(isp, HCCR, HCCR_RESET);
1251 isp_prt(isp, ISP_LOGERR,
1252 "RISC paused at interrupt (%x->%x\n", hccr,
1253 ISP_READ(isp, HCCR));
1255 isp_prt(isp, ISP_LOGERR, "unknown interrerupt 0x%x\n",
1263 isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1266 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1269 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1271 * We will assume that someone has paused the RISC processor.
1273 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1274 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1275 oldconf | BIU_PCI_CONF1_SXP);
1277 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1278 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1279 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1285 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint16_t val)
1287 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1290 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1292 * We will assume that someone has paused the RISC processor.
1294 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1295 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1296 oldconf | BIU_PCI_CONF1_SXP);
1298 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1299 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1300 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1305 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1307 uint16_t rv, oc = 0;
1308 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1310 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1311 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1314 * We will assume that someone has paused the RISC processor.
1316 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1317 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1318 if (regoff & SXP_BANK1_SELECT)
1319 tc |= BIU_PCI1080_CONF1_SXP1;
1321 tc |= BIU_PCI1080_CONF1_SXP0;
1322 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1323 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1324 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1325 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1326 oc | BIU_PCI1080_CONF1_DMA);
1328 rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1330 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1336 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint16_t val)
1338 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1341 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1342 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1345 * We will assume that someone has paused the RISC processor.
1347 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1348 tc = oc & ~BIU_PCI1080_CONF1_DMA;
1349 if (regoff & SXP_BANK1_SELECT)
1350 tc |= BIU_PCI1080_CONF1_SXP1;
1352 tc |= BIU_PCI1080_CONF1_SXP0;
1353 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1354 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1355 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1356 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1357 oc | BIU_PCI1080_CONF1_DMA);
1359 BXW2(pcs, IspVirt2Off(isp, regoff), val);
1361 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1371 static void imc(void *, bus_dma_segment_t *, int, int);
1374 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1376 struct imush *imushp = (struct imush *) arg;
1378 imushp->error = error;
1380 ispsoftc_t *isp =imushp->isp;
1381 bus_addr_t addr = segs->ds_addr;
1383 isp->isp_rquest_dma = addr;
1384 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1385 isp->isp_result_dma = addr;
1387 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1388 FCPARAM(isp)->isp_scdma = addr;
1394 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1396 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1398 #if __FreeBSD_version < 500000
1399 #define isp_dma_tag_create bus_dma_tag_create
1401 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \
1402 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \
1403 busdma_lock_mutex, &Giant, z)
1407 isp_pci_mbxdma(ispsoftc_t *isp)
1409 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1413 bus_size_t slim; /* segment size */
1414 bus_addr_t llim; /* low limit of unavailable dma */
1415 bus_addr_t hlim; /* high limit of unavailable dma */
1419 * Already been here? If so, leave...
1421 if (isp->isp_rquest) {
1425 hlim = BUS_SPACE_MAXADDR;
1426 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1427 slim = (bus_size_t) (1ULL << 32);
1428 llim = BUS_SPACE_MAXADDR;
1430 llim = BUS_SPACE_MAXADDR_32BIT;
1435 * XXX: We don't really support 64 bit target mode for parallel scsi yet
1437 #ifdef ISP_TARGET_MODE
1438 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) {
1439 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet");
1445 if (isp_dma_tag_create(bus_get_dma_tag(pcs->pci_dev), 1, slim, llim,
1446 hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0,
1448 isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1454 len = sizeof (XS_T **) * isp->isp_maxcmds;
1455 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1456 if (isp->isp_xflist == NULL) {
1457 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1461 #ifdef ISP_TARGET_MODE
1462 len = sizeof (void **) * isp->isp_maxcmds;
1463 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1464 if (isp->isp_tgtlist == NULL) {
1465 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1470 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1471 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
1472 if (pcs->dmaps == NULL) {
1473 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1474 free(isp->isp_xflist, M_DEVBUF);
1475 #ifdef ISP_TARGET_MODE
1476 free(isp->isp_tgtlist, M_DEVBUF);
1483 * Allocate and map the request, result queues, plus FC scratch area.
1485 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1486 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1488 len += ISP2100_SCRLEN;
1491 ns = (len / PAGE_SIZE) + 1;
1493 * Create a tag for the control spaces- force it to within 32 bits.
1495 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim,
1496 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1497 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1498 isp_prt(isp, ISP_LOGERR,
1499 "cannot create a dma tag for control spaces");
1500 free(pcs->dmaps, M_DEVBUF);
1501 free(isp->isp_xflist, M_DEVBUF);
1502 #ifdef ISP_TARGET_MODE
1503 free(isp->isp_tgtlist, M_DEVBUF);
1509 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1510 &isp->isp_cdmap) != 0) {
1511 isp_prt(isp, ISP_LOGERR,
1512 "cannot allocate %d bytes of CCB memory", len);
1513 bus_dma_tag_destroy(isp->isp_cdmat);
1514 free(isp->isp_xflist, M_DEVBUF);
1515 #ifdef ISP_TARGET_MODE
1516 free(isp->isp_tgtlist, M_DEVBUF);
1518 free(pcs->dmaps, M_DEVBUF);
1523 for (i = 0; i < isp->isp_maxcmds; i++) {
1524 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1526 isp_prt(isp, ISP_LOGERR,
1527 "error %d creating per-cmd DMA maps", error);
1529 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1537 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1539 isp_prt(isp, ISP_LOGERR,
1540 "error %d loading dma map for control areas", im.error);
1544 isp->isp_rquest = base;
1545 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1546 isp->isp_result = base;
1548 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1549 FCPARAM(isp)->isp_scratch = base;
1555 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1556 bus_dma_tag_destroy(isp->isp_cdmat);
1557 free(isp->isp_xflist, M_DEVBUF);
1558 #ifdef ISP_TARGET_MODE
1559 free(isp->isp_tgtlist, M_DEVBUF);
1561 free(pcs->dmaps, M_DEVBUF);
1563 isp->isp_rquest = NULL;
1576 #define MUSHERR_NOQENTRIES -2
1578 #ifdef ISP_TARGET_MODE
1580 * We need to handle DMA for target mode differently from initiator mode.
1582 * DMA mapping and construction and submission of CTIO Request Entries
1583 * and rendevous for completion are very tightly coupled because we start
1584 * out by knowing (per platform) how much data we have to move, but we
1585 * don't know, up front, how many DMA mapping segments will have to be used
1586 * cover that data, so we don't know how many CTIO Request Entries we
1587 * will end up using. Further, for performance reasons we may want to
1588 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1590 * The standard vector still goes through isp_pci_dmasetup, but the callback
1591 * for the DMA mapping routines comes here instead with the whole transfer
1592 * mapped and a pointer to a partially filled in already allocated request
1593 * queue entry. We finish the job.
1595 static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1596 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1598 #define STATUS_WITH_DATA 1
1601 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1604 struct ccb_scsiio *csio;
1606 struct isp_pcisoftc *pcs;
1608 ct_entry_t *cto, *qe;
1609 uint8_t scsi_status;
1610 uint16_t curi, nxti, handle;
1613 int nth_ctio, nctios, send_status;
1615 mp = (mush_t *) arg;
1622 csio = mp->cmd_token;
1624 curi = isp->isp_reqidx;
1625 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1628 cto->ct_seg_count = 0;
1629 cto->ct_header.rqs_entry_count = 1;
1630 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1633 cto->ct_header.rqs_seqno = 1;
1634 isp_prt(isp, ISP_LOGTDEBUG1,
1635 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1636 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1637 cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1638 cto->ct_scsi_status, cto->ct_resid);
1639 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1640 isp_put_ctio(isp, cto, qe);
1644 nctios = nseg / ISP_RQDSEG;
1645 if (nseg % ISP_RQDSEG) {
1650 * Save syshandle, and potentially any SCSI status, which we'll
1651 * reinsert on the last CTIO we're going to send.
1654 handle = cto->ct_syshandle;
1655 cto->ct_syshandle = 0;
1656 cto->ct_header.rqs_seqno = 0;
1657 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1660 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1661 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1663 * Preserve residual.
1665 resid = cto->ct_resid;
1668 * Save actual SCSI status.
1670 scsi_status = cto->ct_scsi_status;
1672 #ifndef STATUS_WITH_DATA
1673 sflags |= CT_NO_DATA;
1675 * We can't do a status at the same time as a data CTIO, so
1676 * we need to synthesize an extra CTIO at this level.
1681 sflags = scsi_status = resid = 0;
1685 cto->ct_scsi_status = 0;
1687 pcs = (struct isp_pcisoftc *)isp;
1688 dp = &pcs->dmaps[isp_handle_index(handle)];
1689 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1690 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1692 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1697 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1704 if (seglim > ISP_RQDSEG)
1705 seglim = ISP_RQDSEG;
1707 for (seg = 0; seg < seglim; seg++, nseg--) {
1709 * Unlike normal initiator commands, we don't
1710 * do any swizzling here.
1712 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1713 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1714 cto->ct_xfrlen += dm_segs->ds_len;
1717 cto->ct_seg_count = seg;
1720 * This case should only happen when we're sending an
1721 * extra CTIO with final status.
1723 if (send_status == 0) {
1724 isp_prt(isp, ISP_LOGWARN,
1725 "tdma_mk ran out of segments");
1732 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1733 * ct_tagtype, and ct_timeout have been carried over
1734 * unchanged from what our caller had set.
1736 * The dataseg fields and the seg_count fields we just got
1737 * through setting. The data direction we've preserved all
1738 * along and only clear it if we're now sending status.
1741 if (nth_ctio == nctios - 1) {
1743 * We're the last in a sequence of CTIOs, so mark
1744 * this CTIO and save the handle to the CCB such that
1745 * when this CTIO completes we can free dma resources
1746 * and do whatever else we need to do to finish the
1747 * rest of the command. We *don't* give this to the
1748 * firmware to work on- the caller will do that.
1751 cto->ct_syshandle = handle;
1752 cto->ct_header.rqs_seqno = 1;
1755 cto->ct_scsi_status = scsi_status;
1756 cto->ct_flags |= sflags;
1757 cto->ct_resid = resid;
1760 isp_prt(isp, ISP_LOGTDEBUG1,
1761 "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1762 "scsi status %x resid %d",
1763 cto->ct_fwhandle, csio->ccb_h.target_lun,
1764 cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1765 cto->ct_scsi_status, cto->ct_resid);
1767 isp_prt(isp, ISP_LOGTDEBUG1,
1768 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1769 cto->ct_fwhandle, csio->ccb_h.target_lun,
1770 cto->ct_iid, cto->ct_tag_val,
1773 isp_put_ctio(isp, cto, qe);
1774 ISP_TDQE(isp, "last tdma_mk", curi, cto);
1776 MEMORYBARRIER(isp, SYNC_REQUEST,
1780 ct_entry_t *oqe = qe;
1783 * Make sure syshandle fields are clean
1785 cto->ct_syshandle = 0;
1786 cto->ct_header.rqs_seqno = 0;
1788 isp_prt(isp, ISP_LOGTDEBUG1,
1789 "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1790 cto->ct_fwhandle, csio->ccb_h.target_lun,
1791 cto->ct_iid, cto->ct_flags);
1797 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1798 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1799 if (nxti == mp->optr) {
1800 isp_prt(isp, ISP_LOGTDEBUG0,
1801 "Queue Overflow in tdma_mk");
1802 mp->error = MUSHERR_NOQENTRIES;
1807 * Now that we're done with the old CTIO,
1808 * flush it out to the request queue.
1810 ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1811 isp_put_ctio(isp, cto, oqe);
1812 if (nth_ctio != 0) {
1813 MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1816 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1819 * Reset some fields in the CTIO so we can reuse
1820 * for the next one we'll flush to the request
1823 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1824 cto->ct_header.rqs_entry_count = 1;
1825 cto->ct_header.rqs_flags = 0;
1827 cto->ct_scsi_status = 0;
1830 cto->ct_seg_count = 0;
1831 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1838 * We don't have to do multiple CTIOs here. Instead, we can just do
1839 * continuation segments as needed. This greatly simplifies the code
1840 * improves performance.
1844 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1847 struct ccb_scsiio *csio;
1849 ct2_entry_t *cto, *qe;
1850 uint16_t curi, nxti;
1855 mp = (mush_t *) arg;
1862 csio = mp->cmd_token;
1865 curi = isp->isp_reqidx;
1866 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1869 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1870 isp_prt(isp, ISP_LOGWARN,
1871 "dma2_tgt_fc, a status CTIO2 without MODE1 "
1872 "set (0x%x)", cto->ct_flags);
1877 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1878 * flags to NO DATA and clear relative offset flags.
1879 * We preserve the ct_resid and the response area.
1881 cto->ct_header.rqs_seqno = 1;
1882 cto->ct_seg_count = 0;
1884 isp_prt(isp, ISP_LOGTDEBUG1,
1885 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1886 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1887 cto->ct_iid, cto->ct_flags, cto->ct_status,
1888 cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1889 if (IS_2KLOGIN(isp)) {
1891 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
1893 isp_put_ctio2(isp, cto, qe);
1895 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1899 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1900 isp_prt(isp, ISP_LOGERR,
1901 "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1902 "(0x%x)", cto->ct_flags);
1911 * Check to see if we need to DAC addressing or not.
1913 * Any address that's over the 4GB boundary causes this
1917 if (sizeof (bus_addr_t) > 4) {
1918 for (segcnt = 0; segcnt < nseg; segcnt++) {
1919 uint64_t addr = dm_segs[segcnt].ds_addr;
1920 if (addr >= 0x100000000LL) {
1925 if (segcnt != nseg) {
1926 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3;
1927 seglim = ISP_RQDSEG_T3;
1928 ds64 = &cto->rsp.m0.ct_dataseg64[0];
1931 seglim = ISP_RQDSEG_T2;
1933 ds = &cto->rsp.m0.ct_dataseg[0];
1935 cto->ct_seg_count = 0;
1938 * Set up the CTIO2 data segments.
1940 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg;
1941 cto->ct_seg_count++, segcnt++) {
1944 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
1945 ds64->ds_base = dm_segs[segcnt].ds_addr;
1946 ds64->ds_count = dm_segs[segcnt].ds_len;
1949 ds->ds_base = dm_segs[segcnt].ds_addr;
1950 ds->ds_count = dm_segs[segcnt].ds_len;
1953 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1954 #if __FreeBSD_version < 500000
1955 isp_prt(isp, ISP_LOGTDEBUG1,
1956 "isp_send_ctio2: ent0[%d]0x%llx:%llu",
1957 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr,
1958 (uint64_t)dm_segs[segcnt].ds_len);
1960 isp_prt(isp, ISP_LOGTDEBUG1,
1961 "isp_send_ctio2: ent0[%d]0x%jx:%ju",
1962 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr,
1963 (uintmax_t)dm_segs[segcnt].ds_len);
1967 while (segcnt < nseg) {
1970 ispcontreq_t local, *crq = &local, *qep;
1972 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1974 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1975 if (nxti == mp->optr) {
1977 isp_prt(isp, ISP_LOGTDEBUG0,
1978 "tdma_mkfc: request queue overflow");
1979 mp->error = MUSHERR_NOQENTRIES;
1982 cto->ct_header.rqs_entry_count++;
1983 MEMZERO((void *)crq, sizeof (*crq));
1984 crq->req_header.rqs_entry_count = 1;
1985 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) {
1986 seglim = ISP_CDSEG64;
1988 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0];
1989 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
1992 ds = &crq->req_dataseg[0];
1994 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1996 for (seg = 0; segcnt < nseg && seg < seglim;
2000 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
2001 ds64->ds_base = dm_segs[segcnt].ds_addr;
2002 ds64->ds_count = dm_segs[segcnt].ds_len;
2005 ds->ds_base = dm_segs[segcnt].ds_addr;
2006 ds->ds_count = dm_segs[segcnt].ds_len;
2009 #if __FreeBSD_version < 500000
2010 isp_prt(isp, ISP_LOGTDEBUG1,
2011 "isp_send_ctio2: ent%d[%d]%llx:%llu",
2012 cto->ct_header.rqs_entry_count-1, seg,
2013 (uint64_t)dm_segs[segcnt].ds_addr,
2014 (uint64_t)dm_segs[segcnt].ds_len);
2016 isp_prt(isp, ISP_LOGTDEBUG1,
2017 "isp_send_ctio2: ent%d[%d]%jx:%ju",
2018 cto->ct_header.rqs_entry_count-1, seg,
2019 (uintmax_t)dm_segs[segcnt].ds_addr,
2020 (uintmax_t)dm_segs[segcnt].ds_len);
2022 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
2023 cto->ct_seg_count++;
2025 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
2026 isp_put_cont_req(isp, crq, qep);
2027 ISP_TDQE(isp, "cont entry", curi, qep);
2031 * No do final twiddling for the CTIO itself.
2033 cto->ct_header.rqs_seqno = 1;
2034 isp_prt(isp, ISP_LOGTDEBUG1,
2035 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
2036 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
2037 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
2039 if (IS_2KLOGIN(isp))
2040 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
2042 isp_put_ctio2(isp, cto, qe);
2043 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
2048 static void dma2_a64(void *, bus_dma_segment_t *, int, int);
2049 static void dma2(void *, bus_dma_segment_t *, int, int);
2052 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2056 struct ccb_scsiio *csio;
2057 struct isp_pcisoftc *pcs;
2059 bus_dma_segment_t *eseg;
2061 int seglim, datalen;
2064 mp = (mush_t *) arg;
2071 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2075 csio = mp->cmd_token;
2078 pcs = (struct isp_pcisoftc *)mp->isp;
2079 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2082 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2083 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2085 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2087 datalen = XS_XFRLEN(csio);
2090 * We're passed an initial partially filled in entry that
2091 * has most fields filled in except for data transfer
2094 * Our job is to fill in the initial request queue entry and
2095 * then to start allocating and filling in continuation entries
2096 * until we've covered the entire transfer.
2100 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
2101 seglim = ISP_RQDSEG_T3;
2102 ((ispreqt3_t *)rq)->req_totalcnt = datalen;
2103 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2104 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2106 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2109 rq->req_header.rqs_entry_type = RQSTYPE_A64;
2110 if (csio->cdb_len > 12) {
2113 seglim = ISP_RQDSEG_A64;
2115 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2116 rq->req_flags |= REQFLAG_DATA_IN;
2118 rq->req_flags |= REQFLAG_DATA_OUT;
2122 eseg = dm_segs + nseg;
2124 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2126 ispreqt3_t *rq3 = (ispreqt3_t *)rq;
2127 rq3->req_dataseg[rq3->req_seg_count].ds_base =
2128 DMA_LO32(dm_segs->ds_addr);
2129 rq3->req_dataseg[rq3->req_seg_count].ds_basehi =
2130 DMA_HI32(dm_segs->ds_addr);
2131 rq3->req_dataseg[rq3->req_seg_count].ds_count =
2134 rq->req_dataseg[rq->req_seg_count].ds_base =
2135 DMA_LO32(dm_segs->ds_addr);
2136 rq->req_dataseg[rq->req_seg_count].ds_basehi =
2137 DMA_HI32(dm_segs->ds_addr);
2138 rq->req_dataseg[rq->req_seg_count].ds_count =
2141 datalen -= dm_segs->ds_len;
2142 rq->req_seg_count++;
2146 while (datalen > 0 && dm_segs != eseg) {
2148 ispcontreq64_t local, *crq = &local, *cqe;
2150 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2152 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2153 if (nxti == mp->optr) {
2154 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2155 mp->error = MUSHERR_NOQENTRIES;
2158 rq->req_header.rqs_entry_count++;
2159 MEMZERO((void *)crq, sizeof (*crq));
2160 crq->req_header.rqs_entry_count = 1;
2161 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2164 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
2165 crq->req_dataseg[seglim].ds_base =
2166 DMA_LO32(dm_segs->ds_addr);
2167 crq->req_dataseg[seglim].ds_basehi =
2168 DMA_HI32(dm_segs->ds_addr);
2169 crq->req_dataseg[seglim].ds_count =
2171 rq->req_seg_count++;
2174 datalen -= dm_segs->ds_len;
2176 isp_put_cont64_req(isp, crq, cqe);
2177 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2183 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2187 struct ccb_scsiio *csio;
2188 struct isp_pcisoftc *pcs;
2190 bus_dma_segment_t *eseg;
2192 int seglim, datalen;
2195 mp = (mush_t *) arg;
2202 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2206 csio = mp->cmd_token;
2209 pcs = (struct isp_pcisoftc *)mp->isp;
2210 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2213 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2214 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2216 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2219 datalen = XS_XFRLEN(csio);
2222 * We're passed an initial partially filled in entry that
2223 * has most fields filled in except for data transfer
2226 * Our job is to fill in the initial request queue entry and
2227 * then to start allocating and filling in continuation entries
2228 * until we've covered the entire transfer.
2232 seglim = ISP_RQDSEG_T2;
2233 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
2234 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2235 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2237 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2240 if (csio->cdb_len > 12) {
2243 seglim = ISP_RQDSEG;
2245 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2246 rq->req_flags |= REQFLAG_DATA_IN;
2248 rq->req_flags |= REQFLAG_DATA_OUT;
2252 eseg = dm_segs + nseg;
2254 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2256 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
2257 rq2->req_dataseg[rq2->req_seg_count].ds_base =
2258 DMA_LO32(dm_segs->ds_addr);
2259 rq2->req_dataseg[rq2->req_seg_count].ds_count =
2262 rq->req_dataseg[rq->req_seg_count].ds_base =
2263 DMA_LO32(dm_segs->ds_addr);
2264 rq->req_dataseg[rq->req_seg_count].ds_count =
2267 datalen -= dm_segs->ds_len;
2268 rq->req_seg_count++;
2272 while (datalen > 0 && dm_segs != eseg) {
2274 ispcontreq_t local, *crq = &local, *cqe;
2276 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2278 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2279 if (nxti == mp->optr) {
2280 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2281 mp->error = MUSHERR_NOQENTRIES;
2284 rq->req_header.rqs_entry_count++;
2285 MEMZERO((void *)crq, sizeof (*crq));
2286 crq->req_header.rqs_entry_count = 1;
2287 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2290 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
2291 crq->req_dataseg[seglim].ds_base =
2292 DMA_LO32(dm_segs->ds_addr);
2293 crq->req_dataseg[seglim].ds_count =
2295 rq->req_seg_count++;
2298 datalen -= dm_segs->ds_len;
2300 isp_put_cont_req(isp, crq, cqe);
2301 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2307 * We enter with ISP_LOCK held
2310 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq,
2311 uint16_t *nxtip, uint16_t optr)
2313 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2315 bus_dmamap_t *dp = NULL;
2317 void (*eptr)(void *, bus_dma_segment_t *, int, int);
2319 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
2320 #ifdef ISP_TARGET_MODE
2321 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
2327 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2328 (csio->dxfer_len == 0)) {
2331 mp->cmd_token = csio;
2332 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */
2336 ISPLOCK_2_CAMLOCK(isp);
2337 (*eptr)(mp, NULL, 0, 0);
2338 CAMLOCK_2_ISPLOCK(isp);
2343 if (sizeof (bus_addr_t) > 4) {
2350 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2351 (csio->dxfer_len == 0)) {
2352 rq->req_seg_count = 1;
2357 * Do a virtual grapevine step to collect info for
2358 * the callback dma allocation that we have to use...
2362 mp->cmd_token = csio;
2368 ISPLOCK_2_CAMLOCK(isp);
2369 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2370 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2372 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2374 error = bus_dmamap_load(pcs->dmat, *dp,
2375 csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
2376 if (error == EINPROGRESS) {
2377 bus_dmamap_unload(pcs->dmat, *dp);
2379 isp_prt(isp, ISP_LOGERR,
2380 "deferred dma allocation not supported");
2381 } else if (error && mp->error == 0) {
2383 isp_prt(isp, ISP_LOGERR,
2384 "error %d in dma mapping code", error);
2390 /* Pointer to physical buffer */
2391 struct bus_dma_segment seg;
2392 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
2393 seg.ds_len = csio->dxfer_len;
2394 (*eptr)(mp, &seg, 1, 0);
2397 struct bus_dma_segment *segs;
2399 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2400 isp_prt(isp, ISP_LOGERR,
2401 "Physical segment pointers unsupported");
2403 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2404 isp_prt(isp, ISP_LOGERR,
2405 "Virtual segment addresses unsupported");
2408 /* Just use the segments provided */
2409 segs = (struct bus_dma_segment *) csio->data_ptr;
2410 (*eptr)(mp, segs, csio->sglist_cnt, 0);
2413 CAMLOCK_2_ISPLOCK(isp);
2415 int retval = CMD_COMPLETE;
2416 if (mp->error == MUSHERR_NOQENTRIES) {
2417 retval = CMD_EAGAIN;
2418 } else if (mp->error == EFBIG) {
2419 XS_SETERR(csio, CAM_REQ_TOO_BIG);
2420 } else if (mp->error == EINVAL) {
2421 XS_SETERR(csio, CAM_REQ_INVALID);
2423 XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
2428 switch (rq->req_header.rqs_entry_type) {
2429 case RQSTYPE_REQUEST:
2430 isp_put_request(isp, rq, qep);
2432 case RQSTYPE_CMDONLY:
2433 isp_put_extended_request(isp, (ispextreq_t *)rq,
2434 (ispextreq_t *)qep);
2437 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
2441 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
2444 return (CMD_QUEUED);
2448 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint16_t handle)
2450 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2451 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
2452 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2453 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
2455 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
2457 bus_dmamap_unload(pcs->dmat, *dp);
2462 isp_pci_reset1(ispsoftc_t *isp)
2464 /* Make sure the BIOS is disabled */
2465 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2466 /* and enable interrupts */
2471 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
2473 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2475 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2477 printf("%s:\n", device_get_nameunit(isp->isp_dev));
2479 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2481 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2482 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2483 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2484 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2488 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2489 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2490 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2491 ISP_READ(isp, CDMA_FIFO_STS));
2492 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2493 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2494 ISP_READ(isp, DDMA_FIFO_STS));
2495 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2496 ISP_READ(isp, SXP_INTERRUPT),
2497 ISP_READ(isp, SXP_GROSS_ERR),
2498 ISP_READ(isp, SXP_PINS_CTRL));
2499 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2501 printf(" mbox regs: %x %x %x %x %x\n",
2502 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2503 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2504 ISP_READ(isp, OUTMAILBOX4));
2505 printf(" PCI Status Command/Status=%x\n",
2506 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));