2 * Copyright (c) 1997 by Simon Shapiro
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * dpt_scsi.c: SCSI dependant code for the DPT driver
36 * credits: Assisted by Mike Neuffer in the early low level DPT code
37 * Thanx to Mark Salyzyn of DPT for his assistance.
38 * Special thanx to Justin Gibbs for invaluable help in
39 * making this driver look and work like a FreeBSD component.
40 * Last but not least, many thanx to UCB and the FreeBSD
41 * team for creating and maintaining such a wonderful O/S.
43 * TODO: * Add ISA probe code.
44 * * Add driver-level RAID-0. This will allow interoperability with
45 * NiceTry, M$-Doze, Win-Dog, Slowlaris, etc., in recognizing RAID
46 * arrays that span controllers (Wow!).
54 #include <sys/param.h>
55 #include <sys/systm.h>
57 #include <sys/eventhandler.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
63 #include <machine/bus.h>
65 #include <machine/resource.h>
70 #include <cam/cam_ccb.h>
71 #include <cam/cam_sim.h>
72 #include <cam/cam_xpt_sim.h>
73 #include <cam/cam_debug.h>
74 #include <cam/scsi/scsi_all.h>
75 #include <cam/scsi/scsi_message.h>
80 #include <dev/dpt/dpt.h>
82 /* dpt_isa.c, dpt_eisa.c, and dpt_pci.c need this in a central place */
83 devclass_t dpt_devclass;
85 #define microtime_now dpt_time_now()
87 #define dpt_inl(dpt, port) \
88 bus_read_4((dpt)->io_res, (dpt)->io_offset + port)
89 #define dpt_inb(dpt, port) \
90 bus_read_1((dpt)->io_res, (dpt)->io_offset + port)
91 #define dpt_outl(dpt, port, value) \
92 bus_write_4((dpt)->io_res, (dpt)->io_offset + port, value)
93 #define dpt_outb(dpt, port, value) \
94 bus_write_1((dpt)->io_res, (dpt)->io_offset + port, value)
97 * These will have to be setup by parameters passed at boot/load time. For
98 * perfromance reasons, we make them constants for the time being.
100 #define dpt_min_segs DPT_MAX_SEGS
101 #define dpt_max_segs DPT_MAX_SEGS
103 /* Definitions for our use of the SIM private CCB area */
104 #define ccb_dccb_ptr spriv_ptr0
105 #define ccb_dpt_ptr spriv_ptr1
107 /* ================= Private Inline Function declarations ===================*/
108 static __inline int dpt_just_reset(dpt_softc_t * dpt);
109 static __inline int dpt_raid_busy(dpt_softc_t * dpt);
111 static __inline int dpt_pio_wait (u_int32_t, u_int, u_int, u_int);
113 static __inline int dpt_wait(dpt_softc_t *dpt, u_int bits,
115 static __inline struct dpt_ccb* dptgetccb(struct dpt_softc *dpt);
116 static __inline void dptfreeccb(struct dpt_softc *dpt,
117 struct dpt_ccb *dccb);
118 static __inline bus_addr_t dptccbvtop(struct dpt_softc *dpt,
119 struct dpt_ccb *dccb);
121 static __inline int dpt_send_immediate(dpt_softc_t *dpt,
122 eata_ccb_t *cmd_block,
123 u_int32_t cmd_busaddr,
125 u_int ifc, u_int code,
128 /* ==================== Private Function declarations =======================*/
129 static void dptmapmem(void *arg, bus_dma_segment_t *segs,
130 int nseg, int error);
132 static struct sg_map_node*
133 dptallocsgmap(struct dpt_softc *dpt);
135 static int dptallocccbs(dpt_softc_t *dpt);
137 static int dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb,
138 u_int32_t dccb_busaddr, u_int size,
139 u_int page, u_int target, int extent);
140 static void dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb,
141 u_int32_t dccb_busaddr,
144 static void dpt_poll(struct cam_sim *sim);
145 static void dpt_intr_locked(dpt_softc_t *dpt);
147 static void dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
148 int nseg, int error);
150 static void dpt_action(struct cam_sim *sim, union ccb *ccb);
152 static int dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd,
153 u_int32_t cmd_busaddr,
154 u_int command, u_int retries,
155 u_int ifc, u_int code,
157 static void dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb,
158 union ccb *ccb, u_int hba_stat,
159 u_int scsi_stat, u_int32_t resid);
161 static void dpttimeout(void *arg);
162 static void dptshutdown(void *arg, int howto);
164 /* ================= Private Inline Function definitions ====================*/
166 dpt_just_reset(dpt_softc_t * dpt)
168 if ((dpt_inb(dpt, 2) == 'D')
169 && (dpt_inb(dpt, 3) == 'P')
170 && (dpt_inb(dpt, 4) == 'T')
171 && (dpt_inb(dpt, 5) == 'H'))
178 dpt_raid_busy(dpt_softc_t * dpt)
180 if ((dpt_inb(dpt, 0) == 'D')
181 && (dpt_inb(dpt, 1) == 'P')
182 && (dpt_inb(dpt, 2) == 'T'))
190 dpt_pio_wait (u_int32_t base, u_int reg, u_int bits, u_int state)
195 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */
196 c = inb(base + reg) & bits;
207 dpt_wait(dpt_softc_t *dpt, u_int bits, u_int state)
212 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */
213 c = dpt_inb(dpt, HA_RSTATUS) & bits;
222 static __inline struct dpt_ccb*
223 dptgetccb(struct dpt_softc *dpt)
225 struct dpt_ccb* dccb;
228 mtx_assert(&dpt->lock, MA_OWNED);
229 if ((dccb = SLIST_FIRST(&dpt->free_dccb_list)) != NULL) {
230 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links);
232 } else if (dpt->total_dccbs < dpt->max_dccbs) {
234 dccb = SLIST_FIRST(&dpt->free_dccb_list);
236 device_printf(dpt->dev, "Can't malloc DCCB\n");
238 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links);
247 dptfreeccb(struct dpt_softc *dpt, struct dpt_ccb *dccb)
251 mtx_assert(&dpt->lock, MA_OWNED);
252 if ((dccb->state & DCCB_ACTIVE) != 0)
253 LIST_REMOVE(&dccb->ccb->ccb_h, sim_links.le);
254 if ((dccb->state & DCCB_RELEASE_SIMQ) != 0)
255 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
256 else if (dpt->resource_shortage != 0
257 && (dccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
258 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
259 dpt->resource_shortage = FALSE;
261 dccb->state = DCCB_FREE;
262 SLIST_INSERT_HEAD(&dpt->free_dccb_list, dccb, links);
266 static __inline bus_addr_t
267 dptccbvtop(struct dpt_softc *dpt, struct dpt_ccb *dccb)
269 return (dpt->dpt_ccb_busbase
270 + (u_int32_t)((caddr_t)dccb - (caddr_t)dpt->dpt_dccbs));
273 static __inline struct dpt_ccb *
274 dptccbptov(struct dpt_softc *dpt, bus_addr_t busaddr)
276 return (dpt->dpt_dccbs
277 + ((struct dpt_ccb *)busaddr
278 - (struct dpt_ccb *)dpt->dpt_ccb_busbase));
282 * Send a command for immediate execution by the DPT
283 * See above function for IMPORTANT notes.
286 dpt_send_immediate(dpt_softc_t *dpt, eata_ccb_t *cmd_block,
287 u_int32_t cmd_busaddr, u_int retries,
288 u_int ifc, u_int code, u_int code2)
290 return (dpt_send_eata_command(dpt, cmd_block, cmd_busaddr,
291 EATA_CMD_IMMEDIATE, retries, ifc,
296 /* ===================== Private Function definitions =======================*/
298 dptmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
300 bus_addr_t *busaddrp;
302 busaddrp = (bus_addr_t *)arg;
303 *busaddrp = segs->ds_addr;
306 static struct sg_map_node *
307 dptallocsgmap(struct dpt_softc *dpt)
309 struct sg_map_node *sg_map;
311 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
316 /* Allocate S/G space for the next batch of CCBS */
317 if (bus_dmamem_alloc(dpt->sg_dmat, (void **)&sg_map->sg_vaddr,
318 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
319 free(sg_map, M_DEVBUF);
323 (void)bus_dmamap_load(dpt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
324 PAGE_SIZE, dptmapmem, &sg_map->sg_physaddr,
327 SLIST_INSERT_HEAD(&dpt->sg_maps, sg_map, links);
333 * Allocate another chunk of CCB's. Return count of entries added.
336 dptallocccbs(dpt_softc_t *dpt)
338 struct dpt_ccb *next_ccb;
339 struct sg_map_node *sg_map;
346 mtx_assert(&dpt->lock, MA_OWNED);
347 next_ccb = &dpt->dpt_dccbs[dpt->total_dccbs];
349 if (next_ccb == dpt->dpt_dccbs) {
351 * First time through. Re-use the S/G
352 * space we allocated for initialization
355 sg_map = SLIST_FIRST(&dpt->sg_maps);
357 sg_map = dptallocsgmap(dpt);
363 segs = sg_map->sg_vaddr;
364 physaddr = sg_map->sg_physaddr;
366 newcount = (PAGE_SIZE / (dpt->sgsize * sizeof(dpt_sg_t)));
367 for (i = 0; dpt->total_dccbs < dpt->max_dccbs && i < newcount; i++) {
370 error = bus_dmamap_create(dpt->buffer_dmat, /*flags*/0,
374 callout_init_mtx(&next_ccb->timer, &dpt->lock, 0);
375 next_ccb->sg_list = segs;
376 next_ccb->sg_busaddr = htonl(physaddr);
377 next_ccb->eata_ccb.cp_dataDMA = htonl(physaddr);
378 next_ccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr);
379 next_ccb->eata_ccb.cp_reqDMA =
380 htonl(dptccbvtop(dpt, next_ccb)
381 + offsetof(struct dpt_ccb, sense_data));
382 next_ccb->eata_ccb.cp_busaddr = dpt->dpt_ccb_busend;
383 next_ccb->state = DCCB_FREE;
384 next_ccb->tag = dpt->total_dccbs;
385 SLIST_INSERT_HEAD(&dpt->free_dccb_list, next_ccb, links);
387 physaddr += (dpt->sgsize * sizeof(dpt_sg_t));
388 dpt->dpt_ccb_busend += sizeof(*next_ccb);
397 dpt_pio_get_conf (u_int32_t base)
399 static dpt_conf_t * conf;
404 * Allocate a dpt_conf_t
407 conf = (dpt_conf_t *)malloc(sizeof(dpt_conf_t),
408 M_DEVBUF, M_NOWAIT | M_ZERO);
412 * If we didn't get one then we probably won't ever get one.
415 printf("dpt: unable to allocate dpt_conf_t\n");
420 * Reset the controller.
422 outb((base + HA_WCOMMAND), EATA_CMD_RESET);
425 * Wait for the controller to become ready.
426 * For some reason there can be -no- delays after calling reset
427 * before we wait on ready status.
429 if (dpt_pio_wait(base, HA_RSTATUS, HA_SBUSY, 0)) {
430 printf("dpt: timeout waiting for controller to become ready\n");
434 if (dpt_pio_wait(base, HA_RAUXSTAT, HA_ABUSY, 0)) {
435 printf("dpt: timetout waiting for adapter ready.\n");
440 * Send the PIO_READ_CONFIG command.
442 outb((base + HA_WCOMMAND), EATA_CMD_PIO_READ_CONFIG);
445 * Read the data into the struct.
447 p = (u_int16_t *)conf;
448 for (i = 0; i < (sizeof(dpt_conf_t) / 2); i++) {
450 if (dpt_pio_wait(base, HA_RSTATUS, HA_SDRQ, 0)) {
452 printf("dpt: timeout in data read.\n");
456 (*p) = inw(base + HA_RDATA);
460 if (inb(base + HA_RSTATUS) & HA_SERROR) {
462 printf("dpt: error reading configuration data.\n");
466 #define BE_EATA_SIGNATURE 0x45415441
467 #define LE_EATA_SIGNATURE 0x41544145
470 * Test to see if we have a valid card.
472 if ((conf->signature == BE_EATA_SIGNATURE) ||
473 (conf->signature == LE_EATA_SIGNATURE)) {
475 while (inb(base + HA_RSTATUS) & HA_SDRQ) {
476 inw(base + HA_RDATA);
486 * Read a configuration page into the supplied dpt_cont_t buffer.
489 dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr,
490 u_int size, u_int page, u_int target, int extent)
499 mtx_assert(&dpt->lock, MA_OWNED);
500 cp = &dccb->eata_ccb;
501 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(*dpt->sp));
505 cp->Auto_Req_Sen = 1;
506 cp->reqlen = sizeof(struct scsi_sense_data);
509 cp->cp_LUN = 0; /* In the EATA packet */
510 cp->cp_lun = 0; /* In the SCSI command */
512 cp->cp_scsi_cmd = INQUIRY;
515 cp->cp_extent = extent;
518 cp->cp_channel = 0; /* DNC, Interpret mode is set */
520 cp->cp_datalen = htonl(size);
523 * This could be a simple for loop, but we suspected the compiler To
524 * have optimized it a bit too much. Wait for the controller to
527 while (((status = dpt_inb(dpt, HA_RSTATUS)) != (HA_SREADY | HA_SSC)
528 && (status != (HA_SREADY | HA_SSC | HA_SERROR))
529 && (status != (HA_SDRDY | HA_SERROR | HA_SDRQ)))
530 || (dpt_wait(dpt, HA_SBUSY, 0))) {
533 * RAID Drives still Spinning up? (This should only occur if
534 * the DPT controller is in a NON PC (PCI?) platform).
536 if (dpt_raid_busy(dpt)) {
537 device_printf(dpt->dev,
538 "WARNING: Get_conf() RSUS failed.\n");
543 DptStat_Reset_BUSY(dpt->sp);
546 * XXXX We might want to do something more clever than aborting at
547 * this point, like resetting (rebooting) the controller and trying
550 if ((result = dpt_send_eata_command(dpt, cp, dccb_busaddr,
551 EATA_CMD_DMA_SEND_CP,
552 10000, 0, 0, 0)) != 0) {
553 device_printf(dpt->dev,
554 "WARNING: Get_conf() failed (%d) to send "
555 "EATA_CMD_DMA_READ_CONFIG\n",
559 /* Wait for two seconds for a response. This can be slow */
562 && !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
567 /* Grab the status and clear interrupts */
568 status = dpt_inb(dpt, HA_RSTATUS);
571 * Check the status carefully. Return only if the
572 * command was successful.
574 if (((status & HA_SERROR) == 0)
575 && (dpt->sp->hba_stat == 0)
576 && (dpt->sp->scsi_stat == 0)
577 && (dpt->sp->residue_len == 0))
580 if (dpt->sp->scsi_stat == SCSI_STATUS_CHECK_COND)
586 /* Detect Cache parameters and size */
588 dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr,
598 mtx_assert(&dpt->lock, MA_OWNED);
601 * Default setting, for best perfromance..
602 * This is what virtually all cards default to..
604 dpt->cache_type = DPT_CACHE_WRITEBACK;
607 cp = &dccb->eata_ccb;
608 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(dpt->sp));
611 /* Setup the command structure */
614 cp->Auto_Req_Sen = 1;
615 cp->reqlen = sizeof(struct scsi_sense_data);
617 cp->cp_id = 0; /* who cares? The HBA will interpret.. */
618 cp->cp_LUN = 0; /* In the EATA packet */
619 cp->cp_lun = 0; /* In the SCSI command */
622 cp->cp_scsi_cmd = EATA_CMD_DMA_SEND_CP;
631 * Build the EATA Command Packet structure
632 * for a Log Sense Command.
634 cp->cp_cdb[0] = 0x4d;
636 cp->cp_cdb[2] = 0x40 | 0x33;
639 cp->cp_datalen = htonl(512);
641 result = dpt_send_eata_command(dpt, cp, dccb_busaddr,
642 EATA_CMD_DMA_SEND_CP,
645 device_printf(dpt->dev,
646 "WARNING: detect_cache() failed (%d) to send "
647 "EATA_CMD_DMA_SEND_CP\n", result);
650 /* Wait for two seconds for a response. This can be slow... */
653 !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
658 /* Grab the status and clear interrupts */
659 status = dpt_inb(dpt, HA_RSTATUS);
664 if (buff[0] != 0x33) {
667 bytes = DPT_HCP_LENGTH(buff);
668 param = DPT_HCP_FIRST(buff);
670 if (DPT_HCP_CODE(param) != 1) {
672 * DPT Log Page layout error
674 device_printf(dpt->dev, "NOTICE: Log Page (1) layout error\n");
677 if (!(param[4] & 0x4)) {
678 dpt->cache_type = DPT_NO_CACHE;
681 while (DPT_HCP_CODE(param) != 6) {
682 param = DPT_HCP_NEXT(param);
684 || (param >= &buff[bytes])) {
689 if (param[4] & 0x2) {
693 dpt->cache_type = DPT_NO_CACHE;
697 if (param[4] & 0x4) {
698 dpt->cache_type = DPT_CACHE_WRITETHROUGH;
701 /* XXX This isn't correct. This log parameter only has two bytes.... */
703 dpt->cache_size = param[5]
711 dpt_poll(struct cam_sim *sim)
713 dpt_intr_locked(cam_sim_softc(sim));
717 dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
719 struct dpt_ccb *dccb;
721 struct dpt_softc *dpt;
723 dccb = (struct dpt_ccb *)arg;
725 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr;
727 mtx_assert(&dpt->lock, MA_OWNED);
731 device_printf(dpt->dev,
732 "Unexepected error 0x%x returned from "
733 "bus_dmamap_load\n", error);
734 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
735 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
736 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
738 dptfreeccb(dpt, dccb);
745 bus_dma_segment_t *end_seg;
748 end_seg = dm_segs + nseg;
750 /* Copy the segments into our SG list */
752 while (dm_segs < end_seg) {
753 sg->seg_len = htonl(dm_segs->ds_len);
754 sg->seg_addr = htonl(dm_segs->ds_addr);
760 dccb->eata_ccb.scatter = 1;
761 dccb->eata_ccb.cp_dataDMA = dccb->sg_busaddr;
762 dccb->eata_ccb.cp_datalen =
763 htonl(nseg * sizeof(dpt_sg_t));
765 dccb->eata_ccb.cp_dataDMA = dccb->sg_list[0].seg_addr;
766 dccb->eata_ccb.cp_datalen = dccb->sg_list[0].seg_len;
769 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
770 op = BUS_DMASYNC_PREREAD;
772 op = BUS_DMASYNC_PREWRITE;
774 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op);
777 dccb->eata_ccb.cp_dataDMA = 0;
778 dccb->eata_ccb.cp_datalen = 0;
782 * Last time we need to check if this CCB needs to
785 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
787 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
788 dptfreeccb(dpt, dccb);
793 dccb->state |= DCCB_ACTIVE;
794 ccb->ccb_h.status |= CAM_SIM_QUEUED;
795 LIST_INSERT_HEAD(&dpt->pending_ccb_list, &ccb->ccb_h, sim_links.le);
796 callout_reset(&dccb->timer, (ccb->ccb_h.timeout * hz) / 1000,
798 if (dpt_send_eata_command(dpt, &dccb->eata_ccb,
799 dccb->eata_ccb.cp_busaddr,
800 EATA_CMD_DMA_SEND_CP, 0, 0, 0, 0) != 0) {
801 ccb->ccb_h.status = CAM_NO_HBA; /* HBA dead or just busy?? */
803 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
804 dptfreeccb(dpt, dccb);
810 dpt_action(struct cam_sim *sim, union ccb *ccb)
812 struct dpt_softc *dpt;
814 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("dpt_action\n"));
816 dpt = (struct dpt_softc *)cam_sim_softc(sim);
817 mtx_assert(&dpt->lock, MA_OWNED);
819 if ((dpt->state & DPT_HA_SHUTDOWN_ACTIVE) != 0) {
820 xpt_print_path(ccb->ccb_h.path);
821 printf("controller is shutdown. Aborting CCB.\n");
822 ccb->ccb_h.status = CAM_NO_HBA;
827 switch (ccb->ccb_h.func_code) {
828 /* Common cases first */
829 case XPT_SCSI_IO: /* Execute the requested I/O operation */
831 struct ccb_scsiio *csio;
832 struct ccb_hdr *ccbh;
833 struct dpt_ccb *dccb;
834 struct eata_ccb *eccb;
838 /* Max CDB length is 12 bytes */
839 if (csio->cdb_len > 12) {
840 ccb->ccb_h.status = CAM_REQ_INVALID;
844 if ((dccb = dptgetccb(dpt)) == NULL) {
845 dpt->resource_shortage = 1;
846 xpt_freeze_simq(sim, /*count*/1);
847 ccb->ccb_h.status = CAM_REQUEUE_REQ;
851 eccb = &dccb->eata_ccb;
853 /* Link dccb and ccb so we can find one from the other */
855 ccb->ccb_h.ccb_dccb_ptr = dccb;
856 ccb->ccb_h.ccb_dpt_ptr = dpt;
859 * Explicitly set all flags so that the compiler can
860 * be smart about setting them.
862 eccb->SCSI_Reset = 0;
864 eccb->Auto_Req_Sen = (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE)
869 ccb->ccb_h.target_id == dpt->hostid[cam_sim_bus(sim)]
871 eccb->DataOut = (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0;
872 eccb->DataIn = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0;
873 eccb->reqlen = csio->sense_len;
874 eccb->cp_id = ccb->ccb_h.target_id;
875 eccb->cp_channel = cam_sim_bus(sim);
876 eccb->cp_LUN = ccb->ccb_h.target_lun;
878 eccb->cp_dispri = (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
880 eccb->cp_identify = 1;
882 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
883 && csio->tag_action != CAM_TAG_ACTION_NONE) {
884 eccb->cp_msg[0] = csio->tag_action;
885 eccb->cp_msg[1] = dccb->tag;
892 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
893 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
894 bcopy(csio->cdb_io.cdb_ptr,
895 eccb->cp_cdb, csio->cdb_len);
897 /* I guess I could map it in... */
898 ccb->ccb_h.status = CAM_REQ_INVALID;
899 dptfreeccb(dpt, dccb);
904 bcopy(csio->cdb_io.cdb_bytes,
905 eccb->cp_cdb, csio->cdb_len);
908 * If we have any data to send with this command,
909 * map it into bus space.
911 /* Only use S/G if there is a transfer */
912 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
915 error = bus_dmamap_load_ccb(dpt->buffer_dmat,
920 if (error == EINPROGRESS) {
922 * So as to maintain ordering,
923 * freeze the controller queue
924 * until our mapping is
927 xpt_freeze_simq(sim, 1);
928 dccb->state |= CAM_RELEASE_SIMQ;
933 * Does it want them both on or both off?
934 * CAM_DIR_NONE is both on, so this code can
935 * be removed if this is also what the DPT
940 dptexecuteccb(dccb, NULL, 0, 0);
944 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
945 case XPT_ABORT: /* Abort the specified CCB */
947 ccb->ccb_h.status = CAM_REQ_INVALID;
950 case XPT_SET_TRAN_SETTINGS:
952 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
956 case XPT_GET_TRAN_SETTINGS:
957 /* Get default/user set transfer settings for the target */
959 struct ccb_trans_settings *cts = &ccb->cts;
960 struct ccb_trans_settings_scsi *scsi =
961 &cts->proto_specific.scsi;
962 struct ccb_trans_settings_spi *spi =
963 &cts->xport_specific.spi;
965 cts->protocol = PROTO_SCSI;
966 cts->protocol_version = SCSI_REV_2;
967 cts->transport = XPORT_SPI;
968 cts->transport_version = 2;
970 if (cts->type == CTS_TYPE_USER_SETTINGS) {
971 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
972 spi->bus_width = (dpt->max_id > 7)
973 ? MSG_EXT_WDTR_BUS_8_BIT
974 : MSG_EXT_WDTR_BUS_16_BIT;
975 spi->sync_period = 25; /* 10MHz */
976 if (spi->sync_period != 0)
977 spi->sync_offset = 15;
978 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
980 spi->valid = CTS_SPI_VALID_SYNC_RATE
981 | CTS_SPI_VALID_SYNC_OFFSET
982 | CTS_SPI_VALID_SYNC_RATE
983 | CTS_SPI_VALID_BUS_WIDTH
984 | CTS_SPI_VALID_DISC;
985 scsi->valid = CTS_SCSI_VALID_TQ;
986 ccb->ccb_h.status = CAM_REQ_CMP;
988 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
993 case XPT_CALC_GEOMETRY:
996 * XXX Use Adaptec translation until I find out how to
997 * get this information from the card.
999 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1003 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1006 ccb->ccb_h.status = CAM_REQ_CMP;
1010 case XPT_TERM_IO: /* Terminate the I/O process */
1012 ccb->ccb_h.status = CAM_REQ_INVALID;
1015 case XPT_PATH_INQ: /* Path routing inquiry */
1017 struct ccb_pathinq *cpi = &ccb->cpi;
1019 cpi->version_num = 1;
1020 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1021 if (dpt->max_id > 7)
1022 cpi->hba_inquiry |= PI_WIDE_16;
1023 cpi->target_sprt = 0;
1025 cpi->hba_eng_cnt = 0;
1026 cpi->max_target = dpt->max_id;
1027 cpi->max_lun = dpt->max_lun;
1028 cpi->initiator_id = dpt->hostid[cam_sim_bus(sim)];
1029 cpi->bus_id = cam_sim_bus(sim);
1030 cpi->base_transfer_speed = 3300;
1031 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1032 strncpy(cpi->hba_vid, "DPT", HBA_IDLEN);
1033 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1034 cpi->unit_number = cam_sim_unit(sim);
1035 cpi->transport = XPORT_SPI;
1036 cpi->transport_version = 2;
1037 cpi->protocol = PROTO_SCSI;
1038 cpi->protocol_version = SCSI_REV_2;
1039 cpi->ccb_h.status = CAM_REQ_CMP;
1044 ccb->ccb_h.status = CAM_REQ_INVALID;
1051 * This routine will try to send an EATA command to the DPT HBA.
1052 * It will, by default, try 20,000 times, waiting 50us between tries.
1053 * It returns 0 on success and 1 on failure.
1056 dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd_block,
1057 u_int32_t cmd_busaddr, u_int command, u_int retries,
1058 u_int ifc, u_int code, u_int code2)
1066 * I hate this polling nonsense. Wish there was a way to tell the DPT
1067 * to go get commands at its own pace, or to interrupt when ready.
1068 * In the mean time we will measure how many itterations it really
1071 for (loop = 0; loop < retries; loop++) {
1072 if ((dpt_inb(dpt, HA_RAUXSTAT) & HA_ABUSY) == 0)
1078 if (loop < retries) {
1079 #ifdef DPT_MEASURE_PERFORMANCE
1080 if (loop > dpt->performance.max_eata_tries)
1081 dpt->performance.max_eata_tries = loop;
1083 if (loop < dpt->performance.min_eata_tries)
1084 dpt->performance.min_eata_tries = loop;
1087 #ifdef DPT_MEASURE_PERFORMANCE
1088 ++dpt->performance.command_too_busy;
1093 /* The controller is alive, advance the wedge timer */
1094 #ifdef DPT_RESET_HBA
1095 dpt->last_contact = microtime_now;
1098 if (cmd_block == NULL)
1100 #if (BYTE_ORDER == BIG_ENDIAN)
1102 cmd_busaddr = ((cmd_busaddr >> 24) & 0xFF)
1103 | ((cmd_busaddr >> 16) & 0xFF)
1104 | ((cmd_busaddr >> 8) & 0xFF)
1105 | (cmd_busaddr & 0xFF);
1108 /* And now the address */
1109 dpt_outl(dpt, HA_WDMAADDR, cmd_busaddr);
1111 if (command == EATA_CMD_IMMEDIATE) {
1112 if (cmd_block == NULL) {
1113 dpt_outb(dpt, HA_WCODE2, code2);
1114 dpt_outb(dpt, HA_WCODE, code);
1116 dpt_outb(dpt, HA_WIFC, ifc);
1118 dpt_outb(dpt, HA_WCOMMAND, command);
1124 /* ==================== Exported Function definitions =======================*/
1126 dpt_alloc(device_t dev)
1128 dpt_softc_t *dpt = device_get_softc(dev);
1131 mtx_init(&dpt->lock, "dpt", NULL, MTX_DEF);
1132 SLIST_INIT(&dpt->free_dccb_list);
1133 LIST_INIT(&dpt->pending_ccb_list);
1134 for (i = 0; i < MAX_CHANNELS; i++)
1135 dpt->resetlevel[i] = DPT_HA_OK;
1137 #ifdef DPT_MEASURE_PERFORMANCE
1138 dpt_reset_performance(dpt);
1139 #endif /* DPT_MEASURE_PERFORMANCE */
1144 dpt_free(struct dpt_softc *dpt)
1146 switch (dpt->init_level) {
1149 bus_dmamap_unload(dpt->dccb_dmat, dpt->dccb_dmamap);
1151 bus_dmamem_free(dpt->dccb_dmat, dpt->dpt_dccbs,
1153 bus_dmamap_destroy(dpt->dccb_dmat, dpt->dccb_dmamap);
1155 bus_dma_tag_destroy(dpt->dccb_dmat);
1157 bus_dma_tag_destroy(dpt->buffer_dmat);
1160 struct sg_map_node *sg_map;
1162 while ((sg_map = SLIST_FIRST(&dpt->sg_maps)) != NULL) {
1163 SLIST_REMOVE_HEAD(&dpt->sg_maps, links);
1164 bus_dmamap_unload(dpt->sg_dmat,
1166 bus_dmamem_free(dpt->sg_dmat, sg_map->sg_vaddr,
1168 free(sg_map, M_DEVBUF);
1170 bus_dma_tag_destroy(dpt->sg_dmat);
1175 mtx_destroy(&dpt->lock);
1179 dpt_alloc_resources (device_t dev)
1184 dpt = device_get_softc(dev);
1186 dpt->io_res = bus_alloc_resource_any(dev, dpt->io_type, &dpt->io_rid,
1188 if (dpt->io_res == NULL) {
1189 device_printf(dev, "No I/O space?!\n");
1194 dpt->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dpt->irq_rid,
1196 if (dpt->irq_res == NULL) {
1197 device_printf(dev, "No IRQ!\n");
1209 dpt_release_resources (device_t dev)
1211 struct dpt_softc * dpt;
1213 dpt = device_get_softc(dev);
1216 bus_teardown_intr(dev, dpt->irq_res, dpt->ih);
1218 bus_release_resource(dev, dpt->io_type, dpt->io_rid, dpt->io_res);
1220 bus_release_resource(dev, SYS_RES_IRQ, dpt->irq_rid, dpt->irq_res);
1222 bus_release_resource(dev, SYS_RES_DRQ, dpt->drq_rid, dpt->drq_res);
1227 static u_int8_t string_sizes[] =
1229 sizeof(((dpt_inq_t*)NULL)->vendor),
1230 sizeof(((dpt_inq_t*)NULL)->modelNum),
1231 sizeof(((dpt_inq_t*)NULL)->firmware),
1232 sizeof(((dpt_inq_t*)NULL)->protocol),
1236 dpt_init(struct dpt_softc *dpt)
1239 struct sg_map_node *sg_map;
1246 dpt->init_level = 0;
1247 SLIST_INIT(&dpt->sg_maps);
1248 mtx_lock(&dpt->lock);
1250 #ifdef DPT_RESET_BOARD
1251 device_printf(dpt->dev, "resetting HBA\n");
1252 dpt_outb(dpt, HA_WCOMMAND, EATA_CMD_RESET);
1254 /* XXX Shouldn't we poll a status register or something??? */
1256 /* DMA tag for our S/G structures. We allocate in page sized chunks */
1257 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat,
1260 /* lowaddr */ BUS_SPACE_MAXADDR,
1261 /* highaddr */ BUS_SPACE_MAXADDR,
1263 /* filterarg */ NULL,
1264 /* maxsize */ PAGE_SIZE,
1266 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1268 /* lockfunc */ NULL,
1270 &dpt->sg_dmat) != 0) {
1277 * We allocate our DPT ccbs as a contiguous array of bus dma'able
1278 * memory. To get the allocation size, we need to know how many
1279 * ccbs the card supports. This requires a ccb. We solve this
1280 * chicken and egg problem by allocating some re-usable S/G space
1281 * up front, and treating it as our status packet, CCB, and target
1282 * memory space for these commands.
1284 sg_map = dptallocsgmap(dpt);
1288 dpt->sp = (volatile dpt_sp_t *)sg_map->sg_vaddr;
1289 dccb = (struct dpt_ccb *)(uintptr_t)(volatile void *)&dpt->sp[1];
1290 bzero(dccb, sizeof(*dccb));
1291 dpt->sp_physaddr = sg_map->sg_physaddr;
1292 dccb->eata_ccb.cp_dataDMA =
1293 htonl(sg_map->sg_physaddr + sizeof(dpt_sp_t) + sizeof(*dccb));
1294 dccb->eata_ccb.cp_busaddr = ~0;
1295 dccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr);
1296 dccb->eata_ccb.cp_reqDMA = htonl(dpt->sp_physaddr + sizeof(*dccb)
1297 + offsetof(struct dpt_ccb, sense_data));
1299 /* Okay. Fetch our config */
1300 bzero(&dccb[1], sizeof(conf)); /* data area */
1301 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1302 sizeof(conf), 0xc1, 7, 1);
1305 device_printf(dpt->dev, "Failed to get board configuration\n");
1308 bcopy(&dccb[1], &conf, sizeof(conf));
1310 bzero(&dccb[1], sizeof(dpt->board_data));
1311 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1312 sizeof(dpt->board_data), 0, conf.scsi_id0, 0);
1314 device_printf(dpt->dev, "Failed to get inquiry information\n");
1317 bcopy(&dccb[1], &dpt->board_data, sizeof(dpt->board_data));
1319 dpt_detect_cache(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1320 (u_int8_t *)&dccb[1]);
1322 switch (ntohl(conf.splen)) {
1324 dpt->EATA_revision = 'a';
1327 dpt->EATA_revision = 'b';
1330 dpt->EATA_revision = 'c';
1333 dpt->EATA_revision = 'z';
1336 dpt->EATA_revision = '?';
1339 dpt->max_id = conf.MAX_ID;
1340 dpt->max_lun = conf.MAX_LUN;
1341 dpt->irq = conf.IRQ;
1342 dpt->dma_channel = (8 - conf.DMA_channel) & 7;
1343 dpt->channels = conf.MAX_CHAN + 1;
1344 dpt->state |= DPT_HA_OK;
1346 dpt->primary = FALSE;
1348 dpt->primary = TRUE;
1350 dpt->more_support = conf.MORE_support;
1352 if (strncmp(dpt->board_data.firmware, "07G0", 4) >= 0)
1353 dpt->immediate_support = 1;
1355 dpt->immediate_support = 0;
1357 dpt->broken_INQUIRY = FALSE;
1359 dpt->cplen = ntohl(conf.cplen);
1360 dpt->cppadlen = ntohs(conf.cppadlen);
1361 dpt->max_dccbs = ntohs(conf.queuesiz);
1363 if (dpt->max_dccbs > 256) {
1364 device_printf(dpt->dev, "Max CCBs reduced from %d to "
1365 "256 due to tag algorithm\n", dpt->max_dccbs);
1366 dpt->max_dccbs = 256;
1369 dpt->hostid[0] = conf.scsi_id0;
1370 dpt->hostid[1] = conf.scsi_id1;
1371 dpt->hostid[2] = conf.scsi_id2;
1376 dpt->sgsize = ntohs(conf.SGsiz);
1378 /* We can only get 64k buffers, so don't bother to waste space. */
1379 if (dpt->sgsize < 17 || dpt->sgsize > 32)
1382 if (dpt->sgsize > dpt_max_segs)
1383 dpt->sgsize = dpt_max_segs;
1385 /* DMA tag for mapping buffers into device visible space. */
1386 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat,
1389 /* lowaddr */ BUS_SPACE_MAXADDR,
1390 /* highaddr */ BUS_SPACE_MAXADDR,
1392 /* filterarg */ NULL,
1393 /* maxsize */ MAXBSIZE,
1394 /* nsegments */ dpt->sgsize,
1395 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1396 /* flags */ BUS_DMA_ALLOCNOW,
1397 /* lockfunc */ busdma_lock_mutex,
1398 /* lockarg */ &dpt->lock,
1399 &dpt->buffer_dmat) != 0) {
1400 device_printf(dpt->dev,
1401 "bus_dma_tag_create(...,dpt->buffer_dmat) failed\n");
1407 /* DMA tag for our ccb structures and interrupt status packet */
1408 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat,
1411 /* lowaddr */ BUS_SPACE_MAXADDR,
1412 /* highaddr */ BUS_SPACE_MAXADDR,
1414 /* filterarg */ NULL,
1415 /* maxsize */ (dpt->max_dccbs *
1416 sizeof(struct dpt_ccb)) +
1419 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1421 /* lockfunc */ NULL,
1423 &dpt->dccb_dmat) != 0) {
1424 device_printf(dpt->dev,
1425 "bus_dma_tag_create(...,dpt->dccb_dmat) failed\n");
1431 /* Allocation for our ccbs and interrupt status packet */
1432 if (bus_dmamem_alloc(dpt->dccb_dmat, (void **)&dpt->dpt_dccbs,
1433 BUS_DMA_NOWAIT, &dpt->dccb_dmamap) != 0) {
1434 device_printf(dpt->dev,
1435 "bus_dmamem_alloc(dpt->dccb_dmat,...) failed\n");
1441 /* And permanently map them */
1442 bus_dmamap_load(dpt->dccb_dmat, dpt->dccb_dmamap,
1444 (dpt->max_dccbs * sizeof(struct dpt_ccb))
1446 dptmapmem, &dpt->dpt_ccb_busbase, /*flags*/0);
1448 /* Clear them out. */
1449 bzero(dpt->dpt_dccbs,
1450 (dpt->max_dccbs * sizeof(struct dpt_ccb)) + sizeof(dpt_sp_t));
1452 dpt->dpt_ccb_busend = dpt->dpt_ccb_busbase;
1454 dpt->sp = (dpt_sp_t*)&dpt->dpt_dccbs[dpt->max_dccbs];
1455 dpt->sp_physaddr = dpt->dpt_ccb_busbase
1456 + (dpt->max_dccbs * sizeof(dpt_ccb_t));
1459 /* Allocate our first batch of ccbs */
1460 if (dptallocccbs(dpt) == 0) {
1461 device_printf(dpt->dev, "dptallocccbs(dpt) == 0\n");
1462 mtx_unlock(&dpt->lock);
1466 /* Prepare for Target Mode */
1467 dpt->target_mode_enabled = 1;
1469 /* Nuke excess spaces from inquiry information */
1470 strp = dpt->board_data.vendor;
1471 for (i = 0; i < sizeof(string_sizes); i++) {
1472 index = string_sizes[i] - 1;
1473 while (index && (strp[index] == ' '))
1474 strp[index--] = '\0';
1475 strp += string_sizes[i];
1478 device_printf(dpt->dev, "%.8s %.16s FW Rev. %.4s, ",
1479 dpt->board_data.vendor,
1480 dpt->board_data.modelNum, dpt->board_data.firmware);
1482 printf("%d channel%s, ", dpt->channels, dpt->channels > 1 ? "s" : "");
1484 if (dpt->cache_type != DPT_NO_CACHE
1485 && dpt->cache_size != 0) {
1486 printf("%s Cache, ",
1487 dpt->cache_type == DPT_CACHE_WRITETHROUGH
1488 ? "Write-Through" : "Write-Back");
1491 printf("%d CCBs\n", dpt->max_dccbs);
1492 mtx_unlock(&dpt->lock);
1496 mtx_unlock(&dpt->lock);
1501 dpt_attach(dpt_softc_t *dpt)
1503 struct cam_devq *devq;
1507 * Create the device queue for our SIM.
1509 devq = cam_simq_alloc(dpt->max_dccbs);
1513 mtx_lock(&dpt->lock);
1514 for (i = 0; i < dpt->channels; i++) {
1516 * Construct our SIM entry
1518 dpt->sims[i] = cam_sim_alloc(dpt_action, dpt_poll, "dpt",
1519 dpt, device_get_unit(dpt->dev), &dpt->lock,
1521 /*tagged*/dpt->max_dccbs, devq);
1522 if (dpt->sims[i] == NULL) {
1524 cam_simq_free(devq);
1526 printf( "%s(): Unable to attach bus %d "
1527 "due to resource shortage\n",
1532 if (xpt_bus_register(dpt->sims[i], dpt->dev, i) != CAM_SUCCESS){
1533 cam_sim_free(dpt->sims[i], /*free_devq*/i == 0);
1534 dpt->sims[i] = NULL;
1538 if (xpt_create_path(&dpt->paths[i], /*periph*/NULL,
1539 cam_sim_path(dpt->sims[i]),
1540 CAM_TARGET_WILDCARD,
1541 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1542 xpt_bus_deregister(cam_sim_path(dpt->sims[i]));
1543 cam_sim_free(dpt->sims[i], /*free_devq*/i == 0);
1544 dpt->sims[i] = NULL;
1549 mtx_unlock(&dpt->lock);
1551 EVENTHANDLER_REGISTER(shutdown_final, dptshutdown,
1552 dpt, SHUTDOWN_PRI_DEFAULT);
1557 dpt_detach (device_t dev)
1559 struct dpt_softc * dpt;
1562 dpt = device_get_softc(dev);
1564 mtx_lock(&dpt->lock);
1565 for (i = 0; i < dpt->channels; i++) {
1567 xpt_async(AC_LOST_DEVICE, dpt->paths[i], NULL);
1569 xpt_free_path(dpt->paths[i]);
1570 xpt_bus_deregister(cam_sim_path(dpt->sims[i]));
1571 cam_sim_free(dpt->sims[i], /*free_devq*/TRUE);
1573 mtx_unlock(&dpt->lock);
1575 dptshutdown((void *)dpt, SHUTDOWN_PRI_DEFAULT);
1577 dpt_release_resources(dev);
1585 * This is the interrupt handler for the DPT driver.
1593 mtx_lock(&dpt->lock);
1594 dpt_intr_locked(dpt);
1595 mtx_unlock(&dpt->lock);
1599 dpt_intr_locked(dpt_softc_t *dpt)
1607 u_int32_t residue_len; /* Number of bytes not transferred */
1609 /* First order of business is to check if this interrupt is for us */
1610 while (((aux_status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ) != 0) {
1613 * What we want to do now, is to capture the status, all of it,
1614 * move it where it belongs, wake up whoever sleeps waiting to
1615 * process this result, and get out of here.
1617 if (dpt->sp->ccb_busaddr < dpt->dpt_ccb_busbase
1618 || dpt->sp->ccb_busaddr >= dpt->dpt_ccb_busend) {
1619 device_printf(dpt->dev,
1620 "Encountered bogus status packet\n");
1621 status = dpt_inb(dpt, HA_RSTATUS);
1625 dccb = dptccbptov(dpt, dpt->sp->ccb_busaddr);
1627 dpt->sp->ccb_busaddr = ~0;
1629 /* Ignore status packets with EOC not set */
1630 if (dpt->sp->EOC == 0) {
1631 device_printf(dpt->dev,
1632 "ERROR: Request %d received with "
1633 "clear EOC.\n Marking as LOST.\n",
1634 dccb->transaction_id);
1636 #ifdef DPT_HANDLE_TIMEOUTS
1637 dccb->state |= DPT_CCB_STATE_MARKED_LOST;
1639 /* This CLEARS the interrupt! */
1640 status = dpt_inb(dpt, HA_RSTATUS);
1646 * Double buffer the status information so the hardware can
1647 * work on updating the status packet while we decifer the
1648 * one we were just interrupted for.
1649 * According to Mark Salyzyn, we only need few pieces of it.
1651 hba_stat = dpt->sp->hba_stat;
1652 scsi_stat = dpt->sp->scsi_stat;
1653 residue_len = dpt->sp->residue_len;
1655 /* Clear interrupts, check for error */
1656 if ((status = dpt_inb(dpt, HA_RSTATUS)) & HA_SERROR) {
1658 * Error Condition. Check for magic cookie. Exit
1659 * this test on earliest sign of non-reset condition
1662 /* Check that this is not a board reset interrupt */
1663 if (dpt_just_reset(dpt)) {
1664 device_printf(dpt->dev, "HBA rebooted.\n"
1665 " All transactions should be "
1668 device_printf(dpt->dev,
1669 ">>---->> This is incomplete, "
1670 "fix me.... <<----<<");
1671 panic("DPT Rebooted");
1677 callout_stop(&dccb->timer);
1678 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1679 bus_dmasync_op_t op;
1681 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1682 op = BUS_DMASYNC_POSTREAD;
1684 op = BUS_DMASYNC_POSTWRITE;
1685 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op);
1686 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
1689 /* Common Case inline... */
1690 if (hba_stat == HA_NO_ERROR) {
1691 ccb->csio.scsi_status = scsi_stat;
1692 ccb->ccb_h.status = 0;
1693 switch (scsi_stat) {
1694 case SCSI_STATUS_OK:
1695 ccb->ccb_h.status |= CAM_REQ_CMP;
1697 case SCSI_STATUS_CHECK_COND:
1698 case SCSI_STATUS_CMD_TERMINATED:
1699 bcopy(&dccb->sense_data, &ccb->csio.sense_data,
1700 ccb->csio.sense_len);
1701 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1704 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1705 /* XXX Freeze DevQ */
1708 ccb->csio.resid = residue_len;
1709 dptfreeccb(dpt, dccb);
1712 dptprocesserror(dpt, dccb, ccb, hba_stat, scsi_stat,
1719 dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, union ccb *ccb,
1720 u_int hba_stat, u_int scsi_stat, u_int32_t resid)
1722 ccb->csio.resid = resid;
1725 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1728 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1730 case HA_SCSIBUS_RESET:
1731 case HA_HBA_POWER_UP: /* Similar effect to a bus reset??? */
1732 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1735 case HA_CP_RESET: /* XXX ??? */
1736 case HA_CP_ABORT_NA: /* XXX ??? */
1737 case HA_CP_RESET_NA: /* XXX ??? */
1738 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1739 ccb->ccb_h.status = CAM_REQ_ABORTED;
1744 case HA_PCI_STABORT:
1748 ccb->ccb_h.status = CAM_UNCOR_PARITY;
1750 case HA_UNX_MSGRJCT:
1751 ccb->ccb_h.status = CAM_MSG_REJECT_REC;
1753 case HA_UNX_BUSPHASE:
1754 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1756 case HA_UNX_BUS_FREE:
1757 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1760 case HA_RESET_STUCK:
1762 * Dead??? Can the controller get unstuck
1763 * from these conditions
1765 ccb->ccb_h.status = CAM_NO_HBA;
1767 case HA_RSENSE_FAIL:
1768 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1771 device_printf(dpt->dev, "Undocumented Error %x\n", hba_stat);
1772 printf("Please mail this message to shimon@simon-shapiro.org\n");
1773 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1776 dptfreeccb(dpt, dccb);
1781 dpttimeout(void *arg)
1783 struct dpt_ccb *dccb;
1785 struct dpt_softc *dpt;
1787 dccb = (struct dpt_ccb *)arg;
1789 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr;
1790 mtx_assert(&dpt->lock, MA_OWNED);
1791 xpt_print_path(ccb->ccb_h.path);
1792 printf("CCB %p - timed out\n", (void *)dccb);
1795 * Try to clear any pending jobs. FreeBSD will lose interrupts,
1796 * leaving the controller suspended, and commands timed-out.
1797 * By calling the interrupt handler, any command thus stuck will be
1800 dpt_intr_locked(dpt);
1802 if ((dccb->state & DCCB_ACTIVE) == 0) {
1803 xpt_print_path(ccb->ccb_h.path);
1804 printf("CCB %p - timed out CCB already completed\n",
1809 /* Abort this particular command. Leave all others running */
1810 dpt_send_immediate(dpt, &dccb->eata_ccb, dccb->eata_ccb.cp_busaddr,
1811 /*retries*/20000, EATA_SPECIFIC_ABORT, 0, 0);
1812 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1816 * Shutdown the controller and ensure that the cache is completely flushed.
1817 * Called from the shutdown_final event after all disk access has completed.
1820 dptshutdown(void *arg, int howto)
1824 dpt = (dpt_softc_t *)arg;
1826 device_printf(dpt->dev,
1827 "Shutting down (mode %x) HBA. Please wait...\n", howto);
1830 * What we do for a shutdown, is give the DPT early power loss warning
1832 mtx_lock(&dpt->lock);
1833 dpt_send_immediate(dpt, NULL, 0, EATA_POWER_OFF_WARN, 0, 0, 0);
1834 mtx_unlock(&dpt->lock);
1835 DELAY(1000 * 1000 * 5);
1836 device_printf(dpt->dev, "Controller was warned of shutdown and is now "
1840 /*============================================================================*/
1843 #ifdef DPT_RESET_HBA
1846 ** Function name : dpt_reset_hba
1848 ** Description : Reset the HBA and properly discard all pending work
1853 dpt_reset_hba(dpt_softc_t *dpt)
1856 dpt_ccb_t dccb, *dccbp;
1858 struct scsi_xfer *xs;
1860 mtx_assert(&dpt->lock, MA_OWNED);
1862 /* Prepare a control block. The SCSI command part is immaterial */
1865 dccb.state = DPT_CCB_STATE_NEW;
1866 dccb.std_callback = NULL;
1867 dccb.wrbuff_callback = NULL;
1869 ccb = &dccb.eata_ccb;
1870 ccb->CP_OpCode = EATA_CMD_RESET;
1871 ccb->SCSI_Reset = 0;
1873 ccb->Auto_Req_Sen = 1;
1874 ccb->cp_id = 0; /* Should be ignored */
1878 ccb->reqlen = htonl(sizeof(struct scsi_sense_data));
1879 ccb->cp_statDMA = htonl(vtophys(&ccb->cp_statDMA));
1880 ccb->cp_reqDMA = htonl(vtophys(&ccb->cp_reqDMA));
1881 ccb->cp_viraddr = (u_int32_t) & ccb;
1883 ccb->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
1884 ccb->cp_scsi_cmd = 0; /* Should be ignored */
1886 /* Lock up the submitted queue. We are very persistant here */
1887 while (dpt->queue_status & DPT_SUBMITTED_QUEUE_ACTIVE) {
1891 dpt->queue_status |= DPT_SUBMITTED_QUEUE_ACTIVE;
1893 /* Send the RESET message */
1894 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb,
1895 EATA_CMD_RESET, 0, 0, 0, 0)) != 0) {
1896 device_printf(dpt->dev, "Failed to send the RESET message.\n"
1897 " Trying cold boot (ouch!)\n");
1900 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb,
1901 EATA_COLD_BOOT, 0, 0,
1903 panic("%s: Faild to cold boot the HBA\n",
1904 device_get_nameunit(dpt->dev));
1906 #ifdef DPT_MEASURE_PERFORMANCE
1907 dpt->performance.cold_boots++;
1908 #endif /* DPT_MEASURE_PERFORMANCE */
1911 #ifdef DPT_MEASURE_PERFORMANCE
1912 dpt->performance.warm_starts++;
1913 #endif /* DPT_MEASURE_PERFORMANCE */
1915 device_printf(dpt->dev,
1916 "Aborting pending requests. O/S should re-submit\n");
1918 while ((dccbp = TAILQ_FIRST(&dpt->completed_ccbs)) != NULL) {
1919 struct scsi_xfer *xs = dccbp->xs;
1921 /* Not all transactions have xs structs */
1923 /* Tell the kernel proper this did not complete well */
1924 xs->error |= XS_SELTIMEOUT;
1925 xs->flags |= SCSI_ITSDONE;
1929 dpt_Qremove_submitted(dpt, dccbp);
1931 /* Remember, Callbacks are NOT in the standard queue */
1932 if (dccbp->std_callback != NULL) {
1933 (dccbp->std_callback)(dpt, dccbp->eata_ccb.cp_channel,
1936 dpt_Qpush_free(dpt, dccbp);
1940 device_printf(dpt->dev, "reset done aborting all pending commands\n");
1941 dpt->queue_status &= ~DPT_SUBMITTED_QUEUE_ACTIVE;
1944 #endif /* DPT_RESET_HBA */
1947 * Build a Command Block for target mode READ/WRITE BUFFER,
1948 * with the ``sync'' bit ON.
1950 * Although the length and offset are 24 bit fields in the command, they cannot
1951 * exceed 8192 bytes, so we take them as short integers andcheck their range.
1952 * If they are sensless, we round them to zero offset, maximum length and
1957 dpt_target_ccb(dpt_softc_t * dpt, int bus, u_int8_t target, u_int8_t lun,
1958 dpt_ccb_t * ccb, int mode, u_int8_t command,
1959 u_int16_t length, u_int16_t offset)
1963 mtx_assert(&dpt->lock, MA_OWNED);
1964 if ((length + offset) > DPT_MAX_TARGET_MODE_BUFFER_SIZE) {
1965 device_printf(dpt->dev,
1966 "Length of %d, and offset of %d are wrong\n",
1968 length = DPT_MAX_TARGET_MODE_BUFFER_SIZE;
1973 ccb->state = DPT_CCB_STATE_NEW;
1974 ccb->std_callback = (ccb_callback) dpt_target_done;
1975 ccb->wrbuff_callback = NULL;
1977 cp = &ccb->eata_ccb;
1978 cp->CP_OpCode = EATA_CMD_DMA_SEND_CP;
1981 cp->Auto_Req_Sen = 1;
1986 cp->reqlen = htonl(sizeof(struct scsi_sense_data));
1987 cp->cp_statDMA = htonl(vtophys(&cp->cp_statDMA));
1988 cp->cp_reqDMA = htonl(vtophys(&cp->cp_reqDMA));
1989 cp->cp_viraddr = (u_int32_t) & ccb;
1991 cp->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
1993 cp->cp_scsi_cmd = command;
1994 cp->cp_cdb[1] = (u_int8_t) (mode & SCSI_TM_MODE_MASK);
1995 cp->cp_lun = lun; /* Order is important here! */
1996 cp->cp_cdb[2] = 0x00; /* Buffer Id, only 1 :-( */
1997 cp->cp_cdb[3] = (length >> 16) & 0xFF; /* Buffer offset MSB */
1998 cp->cp_cdb[4] = (length >> 8) & 0xFF;
1999 cp->cp_cdb[5] = length & 0xFF;
2000 cp->cp_cdb[6] = (length >> 16) & 0xFF; /* Length MSB */
2001 cp->cp_cdb[7] = (length >> 8) & 0xFF;
2002 cp->cp_cdb[8] = length & 0xFF; /* Length LSB */
2003 cp->cp_cdb[9] = 0; /* No sync, no match bits */
2006 * This could be optimized to live in dpt_register_buffer.
2007 * We keep it here, just in case the kernel decides to reallocate pages
2009 if (dpt_scatter_gather(dpt, ccb, DPT_RW_BUFFER_SIZE,
2010 dpt->rw_buffer[bus][target][lun])) {
2011 device_printf(dpt->dev, "Failed to setup Scatter/Gather for "
2012 "Target-Mode buffer\n");
2016 /* Setup a target mode READ command */
2019 dpt_set_target(int redo, dpt_softc_t * dpt,
2020 u_int8_t bus, u_int8_t target, u_int8_t lun, int mode,
2021 u_int16_t length, u_int16_t offset, dpt_ccb_t * ccb)
2024 mtx_assert(&dpt->lock, MA_OWNED);
2025 if (dpt->target_mode_enabled) {
2027 dpt_target_ccb(dpt, bus, target, lun, ccb, mode,
2028 SCSI_TM_READ_BUFFER, length, offset);
2030 ccb->transaction_id = ++dpt->commands_processed;
2032 #ifdef DPT_MEASURE_PERFORMANCE
2033 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++;
2034 ccb->command_started = microtime_now;
2036 dpt_Qadd_waiting(dpt, ccb);
2037 dpt_sched_queue(dpt);
2039 device_printf(dpt->dev,
2040 "Target Mode Request, but Target Mode is OFF\n");
2045 * Schedule a buffer to be sent to another target.
2046 * The work will be scheduled and the callback provided will be called when
2047 * the work is actually done.
2049 * Please NOTE: ``Anyone'' can send a buffer, but only registered clients
2050 * get notified of receipt of buffers.
2054 dpt_send_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun,
2055 u_int8_t mode, u_int16_t length, u_int16_t offset, void *data,
2056 buff_wr_done callback)
2059 dpt_ccb_t *ccb = NULL;
2061 /* This is an external call. Be a bit paranoid */
2062 dpt = devclass_get_device(dpt_devclass, unit);
2064 return (INVALID_UNIT);
2066 mtx_lock(&dpt->lock);
2067 if (dpt->target_mode_enabled) {
2068 if ((channel >= dpt->channels) || (target > dpt->max_id) ||
2069 (lun > dpt->max_lun)) {
2070 mtx_unlock(&dpt->lock);
2071 return (INVALID_SENDER);
2073 if ((dpt->rw_buffer[channel][target][lun] == NULL) ||
2074 (dpt->buffer_receiver[channel][target][lun] == NULL)) {
2075 mtx_unlock(&dpt->lock);
2076 return (NOT_REGISTERED);
2079 /* Process the free list */
2080 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2081 device_printf(dpt->dev,
2082 "ERROR: Cannot allocate any more free CCB's.\n"
2083 " Please try later\n");
2084 mtx_unlock(&dpt->lock);
2085 return (NO_RESOURCES);
2087 /* Now grab the newest CCB */
2088 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2089 mtx_unlock(&dpt->lock);
2090 panic("%s: Got a NULL CCB from pop_free()\n",
2091 device_get_nameunit(dpt->dev));
2094 bcopy(dpt->rw_buffer[channel][target][lun] + offset, data, length);
2095 dpt_target_ccb(dpt, channel, target, lun, ccb, mode,
2096 SCSI_TM_WRITE_BUFFER,
2098 ccb->std_callback = (ccb_callback) callback; /* Potential trouble */
2100 ccb->transaction_id = ++dpt->commands_processed;
2102 #ifdef DPT_MEASURE_PERFORMANCE
2103 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++;
2104 ccb->command_started = microtime_now;
2106 dpt_Qadd_waiting(dpt, ccb);
2107 dpt_sched_queue(dpt);
2109 mtx_unlock(&dpt->lock);
2112 mtx_unlock(&dpt->lock);
2113 return (DRIVER_DOWN);
2117 dpt_target_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
2121 cp = &ccb->eata_ccb;
2124 * Remove the CCB from the waiting queue.
2125 * We do NOT put it back on the free, etc., queues as it is a special
2126 * ccb, owned by the dpt_softc of this unit.
2128 dpt_Qremove_completed(dpt, ccb);
2130 #define br_channel (ccb->eata_ccb.cp_channel)
2131 #define br_target (ccb->eata_ccb.cp_id)
2132 #define br_lun (ccb->eata_ccb.cp_LUN)
2133 #define br_index [br_channel][br_target][br_lun]
2134 #define read_buffer_callback (dpt->buffer_receiver br_index )
2135 #define read_buffer (dpt->rw_buffer[br_channel][br_target][br_lun])
2136 #define cb(offset) (ccb->eata_ccb.cp_cdb[offset])
2137 #define br_offset ((cb(3) << 16) | (cb(4) << 8) | cb(5))
2138 #define br_length ((cb(6) << 16) | (cb(7) << 8) | cb(8))
2140 /* Different reasons for being here, you know... */
2141 switch (ccb->eata_ccb.cp_scsi_cmd) {
2142 case SCSI_TM_READ_BUFFER:
2143 if (read_buffer_callback != NULL) {
2144 /* This is a buffer generated by a kernel process */
2145 read_buffer_callback(device_get_unit(dpt->dev),
2146 br_channel, br_target, br_lun,
2148 br_offset, br_length);
2151 * This is a buffer waited for by a user (sleeping)
2157 /* We ALWAYS re-issue the same command; args are don't-care */
2158 dpt_set_target(1, 0, 0, 0, 0, 0, 0, 0, 0);
2161 case SCSI_TM_WRITE_BUFFER:
2162 (ccb->wrbuff_callback) (device_get_unit(dpt->dev), br_channel,
2163 br_target, br_offset, br_length,
2164 br_lun, ccb->status_packet.hba_stat);
2167 device_printf(dpt->dev,
2168 "%s is an unsupported command for target mode\n",
2169 scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd));
2171 dpt->target_ccb[br_channel][br_target][br_lun] = NULL;
2172 dpt_Qpush_free(dpt, ccb);
2177 * Use this function to register a client for a buffer read target operation.
2178 * The function you register will be called every time a buffer is received
2179 * by the target mode code.
2182 dpt_register_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun,
2183 u_int8_t mode, u_int16_t length, u_int16_t offset,
2184 dpt_rec_buff callback, dpt_rb_op_t op)
2187 dpt_ccb_t *ccb = NULL;
2190 dpt = devclass_get_device(dpt_devclass, unit);
2192 return (INVALID_UNIT);
2193 mtx_lock(&dpt->lock);
2195 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE) {
2196 mtx_unlock(&dpt->lock);
2197 return (DRIVER_DOWN);
2200 if ((channel > (dpt->channels - 1)) || (target > (dpt->max_id - 1)) ||
2201 (lun > (dpt->max_lun - 1))) {
2202 mtx_unlock(&dpt->lock);
2203 return (INVALID_SENDER);
2206 if (dpt->buffer_receiver[channel][target][lun] == NULL) {
2207 if (op == REGISTER_BUFFER) {
2208 /* Assign the requested callback */
2209 dpt->buffer_receiver[channel][target][lun] = callback;
2212 /* Process the free list */
2213 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2214 device_printf(dpt->dev,
2215 "ERROR: Cannot allocate any more free CCB's.\n"
2216 " Please try later\n");
2217 mtx_unlock(&dpt->lock);
2218 return (NO_RESOURCES);
2220 /* Now grab the newest CCB */
2221 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2222 mtx_unlock(&dpt->lock);
2223 panic("%s: Got a NULL CCB from pop_free()\n",
2224 device_get_nameunit(dpt->dev));
2227 /* Clean up the leftover of the previous tenant */
2228 ccb->status = DPT_CCB_STATE_NEW;
2229 dpt->target_ccb[channel][target][lun] = ccb;
2231 dpt->rw_buffer[channel][target][lun] =
2232 malloc(DPT_RW_BUFFER_SIZE, M_DEVBUF, M_NOWAIT);
2233 if (dpt->rw_buffer[channel][target][lun] == NULL) {
2234 device_printf(dpt->dev, "Failed to allocate "
2235 "Target-Mode buffer\n");
2236 dpt_Qpush_free(dpt, ccb);
2237 mtx_unlock(&dpt->lock);
2238 return (NO_RESOURCES);
2240 dpt_set_target(0, dpt, channel, target, lun, mode,
2241 length, offset, ccb);
2242 mtx_unlock(&dpt->lock);
2243 return (SUCCESSFULLY_REGISTERED);
2245 mtx_unlock(&dpt->lock);
2246 return (NOT_REGISTERED);
2249 if (op == REGISTER_BUFFER) {
2250 if (dpt->buffer_receiver[channel][target][lun] == callback) {
2251 mtx_unlock(&dpt->lock);
2252 return (ALREADY_REGISTERED);
2254 mtx_unlock(&dpt->lock);
2255 return (REGISTERED_TO_ANOTHER);
2258 if (dpt->buffer_receiver[channel][target][lun] == callback) {
2259 dpt->buffer_receiver[channel][target][lun] = NULL;
2260 dpt_Qpush_free(dpt, ccb);
2261 free(dpt->rw_buffer[channel][target][lun], M_DEVBUF);
2262 mtx_unlock(&dpt->lock);
2263 return (SUCCESSFULLY_REGISTERED);
2265 mtx_unlock(&dpt->lock);
2266 return (INVALID_CALLBACK);
2271 mtx_unlock(&dpt->lock);
2274 /* Return the state of the blinking DPT LED's */
2276 dpt_blinking_led(dpt_softc_t * dpt)
2283 mtx_assert(&dpt->lock, MA_OWNED);
2286 for (ndx = 0, state = 0, previous = 0;
2287 (ndx < 10) && (state != previous);
2290 state = dpt_inl(dpt, 1);
2293 if ((state == previous) && (state == DPT_BLINK_INDICATOR))
2294 result = dpt_inb(dpt, 5);
2300 * Execute a command which did not come from the kernel's SCSI layer.
2301 * The only way to map user commands to bus and target is to comply with the
2302 * standard DPT wire-down scheme:
2305 dpt_user_cmd(dpt_softc_t * dpt, eata_pt_t * user_cmd,
2306 caddr_t cmdarg, int minor_no)
2310 int channel, target, lun;
2315 mtx_assert(&dpt->lock, MA_OWNED);
2317 channel = minor2hba(minor_no);
2318 target = minor2target(minor_no);
2319 lun = minor2lun(minor_no);
2321 if ((channel > (dpt->channels - 1))
2322 || (target > dpt->max_id)
2323 || (lun > dpt->max_lun))
2326 if (target == dpt->sc_scsi_link[channel].adapter_targ) {
2327 /* This one is for the controller itself */
2328 if ((user_cmd->eataID[0] != 'E')
2329 || (user_cmd->eataID[1] != 'A')
2330 || (user_cmd->eataID[2] != 'T')
2331 || (user_cmd->eataID[3] != 'A')) {
2335 /* Get a DPT CCB, so we can prepare a command */
2337 /* Process the free list */
2338 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2339 device_printf(dpt->dev,
2340 "ERROR: Cannot allocate any more free CCB's.\n"
2341 " Please try later\n");
2344 /* Now grab the newest CCB */
2345 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2346 panic("%s: Got a NULL CCB from pop_free()\n",
2347 device_get_nameunit(dpt->dev));
2349 /* Clean up the leftover of the previous tenant */
2350 ccb->status = DPT_CCB_STATE_NEW;
2353 bcopy((caddr_t) & user_cmd->command_packet, (caddr_t) & ccb->eata_ccb,
2354 sizeof(eata_ccb_t));
2356 /* We do not want to do user specified scatter/gather. Why?? */
2357 if (ccb->eata_ccb.scatter == 1)
2360 ccb->eata_ccb.Auto_Req_Sen = 1;
2361 ccb->eata_ccb.reqlen = htonl(sizeof(struct scsi_sense_data));
2362 ccb->eata_ccb.cp_datalen = htonl(sizeof(ccb->eata_ccb.cp_datalen));
2363 ccb->eata_ccb.cp_dataDMA = htonl(vtophys(ccb->eata_ccb.cp_dataDMA));
2364 ccb->eata_ccb.cp_statDMA = htonl(vtophys(&ccb->eata_ccb.cp_statDMA));
2365 ccb->eata_ccb.cp_reqDMA = htonl(vtophys(&ccb->eata_ccb.cp_reqDMA));
2366 ccb->eata_ccb.cp_viraddr = (u_int32_t) & ccb;
2368 if (ccb->eata_ccb.DataIn || ccb->eata_ccb.DataOut) {
2369 /* Data I/O is involved in this command. Alocate buffer */
2370 if (ccb->eata_ccb.cp_datalen > PAGE_SIZE) {
2371 data = contigmalloc(ccb->eata_ccb.cp_datalen,
2372 M_TEMP, M_WAITOK, 0, ~0,
2373 ccb->eata_ccb.cp_datalen,
2376 data = malloc(ccb->eata_ccb.cp_datalen, M_TEMP,
2381 device_printf(dpt->dev, "Cannot allocate %d bytes "
2382 "for EATA command\n",
2383 ccb->eata_ccb.cp_datalen);
2386 #define usr_cmd_DMA (caddr_t)user_cmd->command_packet.cp_dataDMA
2387 if (ccb->eata_ccb.DataIn == 1) {
2388 if (copyin(usr_cmd_DMA,
2389 data, ccb->eata_ccb.cp_datalen) == -1)
2393 /* No data I/O involved here. Make sure the DPT knows that */
2394 ccb->eata_ccb.cp_datalen = 0;
2398 if (ccb->eata_ccb.FWNEST == 1)
2399 ccb->eata_ccb.FWNEST = 0;
2401 if (ccb->eata_ccb.cp_datalen != 0) {
2402 if (dpt_scatter_gather(dpt, ccb, ccb->eata_ccb.cp_datalen,
2410 * We are required to quiet a SCSI bus.
2411 * since we do not queue comands on a bus basis,
2412 * we wait for ALL commands on a controller to complete.
2413 * In the mean time, sched_queue() will not schedule new commands.
2415 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
2416 && (ccb->eata_ccb.cp_cdb[2] == BUS_QUIET)) {
2417 /* We wait for ALL traffic for this HBa to subside */
2418 dpt->state |= DPT_HA_QUIET;
2420 while ((submitted = dpt->submitted_ccbs_count) != 0) {
2421 huh = mtx_sleep((void *) dpt, &dpt->lock,
2422 PCATCH | PRIBIO, "dptqt", 100 * hz);
2425 /* Wakeup call received */
2436 /* Resume normal operation */
2437 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
2438 && (ccb->eata_ccb.cp_cdb[2] == BUS_UNQUIET)) {
2439 dpt->state &= ~DPT_HA_QUIET;
2442 * Schedule the command and submit it.
2443 * We bypass dpt_sched_queue, as it will block on DPT_HA_QUIET
2447 ccb->eata_ccb.Auto_Req_Sen = 1; /* We always want this feature */
2449 ccb->transaction_id = ++dpt->commands_processed;
2450 ccb->std_callback = (ccb_callback) dpt_user_cmd_done;
2451 ccb->result = (u_int32_t) & cmdarg;
2454 #ifdef DPT_MEASURE_PERFORMANCE
2455 ++dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd];
2456 ccb->command_started = microtime_now;
2458 dpt_Qadd_waiting(dpt, ccb);
2460 dpt_sched_queue(dpt);
2462 /* Wait for the command to complete */
2463 (void) mtx_sleep((void *) ccb, &dpt->lock, PCATCH | PRIBIO, "dptucw",
2466 /* Free allocated memory */
2474 dpt_user_cmd_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
2479 mtx_unlock(&dpt->lock);
2482 * If Auto Request Sense is on, copyout the sense struct
2484 #define usr_pckt_DMA (caddr_t)(intptr_t)ntohl(ccb->eata_ccb.cp_reqDMA)
2485 #define usr_pckt_len ntohl(ccb->eata_ccb.cp_datalen)
2486 if (ccb->eata_ccb.Auto_Req_Sen == 1) {
2487 if (copyout((caddr_t) & ccb->sense_data, usr_pckt_DMA,
2488 sizeof(struct scsi_sense_data))) {
2489 mtx_lock(&dpt->lock);
2490 ccb->result = EFAULT;
2491 dpt_Qpush_free(dpt, ccb);
2496 /* If DataIn is on, copyout the data */
2497 if ((ccb->eata_ccb.DataIn == 1)
2498 && (ccb->status_packet.hba_stat == HA_NO_ERROR)) {
2499 if (copyout(ccb->data, usr_pckt_DMA, usr_pckt_len)) {
2500 mtx_lock(&dpt->lock);
2501 dpt_Qpush_free(dpt, ccb);
2502 ccb->result = EFAULT;
2508 /* Copyout the status */
2509 result = ccb->status_packet.hba_stat;
2510 cmd_arg = (caddr_t) ccb->result;
2512 if (copyout((caddr_t) & result, cmd_arg, sizeof(result))) {
2513 mtx_lock(&dpt->lock);
2514 dpt_Qpush_free(dpt, ccb);
2515 ccb->result = EFAULT;
2519 mtx_lock(&dpt->lock);
2520 /* Put the CCB back in the freelist */
2521 ccb->state |= DPT_CCB_STATE_COMPLETED;
2522 dpt_Qpush_free(dpt, ccb);
2524 /* Free allocated memory */
2528 #ifdef DPT_HANDLE_TIMEOUTS
2530 * This function walks down the SUBMITTED queue.
2531 * Every request that is too old gets aborted and marked.
2532 * Since the DPT will complete (interrupt) immediately (what does that mean?),
2533 * We just walk the list, aborting old commands and marking them as such.
2534 * The dpt_complete function will get rid of the that were interrupted in the
2537 * This function needs to run at splcam(), as it interacts with the submitted
2538 * queue, as well as the completed and free queues. Just like dpt_intr() does.
2539 * To run it at any ISPL other than that of dpt_intr(), will mean that dpt_intr
2540 * willbe able to pre-empt it, grab a transaction in progress (towards
2541 * destruction) and operate on it. The state of this transaction will be not
2543 * The only other option, is to lock it only as long as necessary but have
2544 * dpt_intr() spin-wait on it. In a UP environment this makes no sense and in
2545 * a SMP environment, the advantage is dubvious for a function that runs once
2546 * every ten seconds for few microseconds and, on systems with healthy
2547 * hardware, does not do anything anyway.
2551 dpt_handle_timeouts(dpt_softc_t * dpt)
2555 if (dpt->state & DPT_HA_TIMEOUTS_ACTIVE) {
2556 device_printf(dpt->dev, "WARNING: Timeout Handling Collision\n");
2559 dpt->state |= DPT_HA_TIMEOUTS_ACTIVE;
2561 /* Loop through the entire submitted queue, looking for lost souls */
2562 TAILQ_FIRST(ccb, &&dpt->submitted_ccbs, links) {
2563 struct scsi_xfer *xs;
2564 u_int32_t age, max_age;
2567 age = dpt_time_delta(ccb->command_started, microtime_now);
2569 #define TenSec 10000000
2571 if (xs == NULL) { /* Local, non-kernel call */
2574 max_age = (((xs->timeout * (dpt->submitted_ccbs_count
2575 + DPT_TIMEOUT_FACTOR))
2577 ? (xs->timeout * (dpt->submitted_ccbs_count
2578 + DPT_TIMEOUT_FACTOR))
2583 * If a transaction is marked lost and is TWICE as old as we
2584 * care, then, and only then do we destroy it!
2586 if (ccb->state & DPT_CCB_STATE_MARKED_LOST) {
2587 /* Remember who is next */
2588 if (age > (max_age * 2)) {
2589 dpt_Qremove_submitted(dpt, ccb);
2590 ccb->state &= ~DPT_CCB_STATE_MARKED_LOST;
2591 ccb->state |= DPT_CCB_STATE_ABORTED;
2592 #define cmd_name scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd)
2593 if (ccb->retries++ > DPT_RETRIES) {
2594 device_printf(dpt->dev,
2595 "ERROR: Destroying stale "
2598 "c%db%dt%du%d (%d/%d)\n",
2599 ccb->transaction_id,
2601 device_get_unit(dpt->dev),
2602 ccb->eata_ccb.cp_channel,
2603 ccb->eata_ccb.cp_id,
2604 ccb->eata_ccb.cp_LUN, age,
2606 #define send_ccb &ccb->eata_ccb
2607 #define ESA EATA_SPECIFIC_ABORT
2608 (void) dpt_send_immediate(dpt,
2612 dpt_Qpush_free(dpt, ccb);
2614 /* The SCSI layer should re-try */
2615 xs->error |= XS_TIMEOUT;
2616 xs->flags |= SCSI_ITSDONE;
2619 device_printf(dpt->dev,
2620 "ERROR: Stale %d (%s) on "
2621 "c%db%dt%du%d (%d)\n"
2624 ccb->transaction_id,
2626 device_get_unit(dpt->dev),
2627 ccb->eata_ccb.cp_channel,
2628 ccb->eata_ccb.cp_id,
2629 ccb->eata_ccb.cp_LUN,
2630 age, ccb->retries, DPT_RETRIES);
2632 dpt_Qpush_waiting(dpt, ccb);
2633 dpt_sched_queue(dpt);
2638 * This is a transaction that is not to be destroyed
2639 * (yet) But it is too old for our liking. We wait as
2640 * long as the upper layer thinks. Not really, we
2641 * multiply that by the number of commands in the
2642 * submitted queue + 1.
2644 if (!(ccb->state & DPT_CCB_STATE_MARKED_LOST) &&
2645 (age != ~0) && (age > max_age)) {
2646 device_printf(dpt->dev,
2647 "ERROR: Marking %d (%s) on "
2649 " as late after %dusec\n",
2650 ccb->transaction_id,
2652 device_get_unit(dpt->dev),
2653 ccb->eata_ccb.cp_channel,
2654 ccb->eata_ccb.cp_id,
2655 ccb->eata_ccb.cp_LUN, age);
2656 ccb->state |= DPT_CCB_STATE_MARKED_LOST;
2661 dpt->state &= ~DPT_HA_TIMEOUTS_ACTIVE;
2665 dpt_timeout(void *arg)
2667 dpt_softc_t *dpt = (dpt_softc_t *) arg;
2669 mtx_assert(&dpt->lock, MA_OWNED);
2670 if (!(dpt->state & DPT_HA_TIMEOUTS_ACTIVE))
2671 dpt_handle_timeouts(dpt);
2673 callout_reset(&dpt->timer, hz * 10, dpt_timeout, dpt);
2676 #endif /* DPT_HANDLE_TIMEOUTS */