2 * Copyright (c) 1997 by Simon Shapiro
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * dpt_scsi.c: SCSI dependant code for the DPT driver
36 * credits: Assisted by Mike Neuffer in the early low level DPT code
37 * Thanx to Mark Salyzyn of DPT for his assistance.
38 * Special thanx to Justin Gibbs for invaluable help in
39 * making this driver look and work like a FreeBSD component.
40 * Last but not least, many thanx to UCB and the FreeBSD
41 * team for creating and maintaining such a wonderful O/S.
43 * TODO: * Add ISA probe code.
44 * * Add driver-level RAID-0. This will allow interoperability with
45 * NiceTry, M$-Doze, Win-Dog, Slowlaris, etc., in recognizing RAID
46 * arrays that span controllers (Wow!).
54 #include <sys/param.h>
55 #include <sys/systm.h>
57 #include <sys/eventhandler.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
63 #include <machine/bus.h>
65 #include <machine/resource.h>
70 #include <cam/cam_ccb.h>
71 #include <cam/cam_sim.h>
72 #include <cam/cam_xpt_sim.h>
73 #include <cam/cam_debug.h>
74 #include <cam/scsi/scsi_all.h>
75 #include <cam/scsi/scsi_message.h>
80 #include <dev/dpt/dpt.h>
82 /* dpt_isa.c, dpt_eisa.c, and dpt_pci.c need this in a central place */
83 devclass_t dpt_devclass;
85 #define microtime_now dpt_time_now()
87 #define dpt_inl(dpt, port) \
88 bus_read_4((dpt)->io_res, (dpt)->io_offset + port)
89 #define dpt_inb(dpt, port) \
90 bus_read_1((dpt)->io_res, (dpt)->io_offset + port)
91 #define dpt_outl(dpt, port, value) \
92 bus_write_4((dpt)->io_res, (dpt)->io_offset + port, value)
93 #define dpt_outb(dpt, port, value) \
94 bus_write_1((dpt)->io_res, (dpt)->io_offset + port, value)
97 * These will have to be setup by parameters passed at boot/load time. For
98 * perfromance reasons, we make them constants for the time being.
100 #define dpt_min_segs DPT_MAX_SEGS
101 #define dpt_max_segs DPT_MAX_SEGS
103 /* Definitions for our use of the SIM private CCB area */
104 #define ccb_dccb_ptr spriv_ptr0
105 #define ccb_dpt_ptr spriv_ptr1
107 /* ================= Private Inline Function declarations ===================*/
108 static __inline int dpt_just_reset(dpt_softc_t * dpt);
109 static __inline int dpt_raid_busy(dpt_softc_t * dpt);
111 static __inline int dpt_pio_wait (u_int32_t, u_int, u_int, u_int);
113 static __inline int dpt_wait(dpt_softc_t *dpt, u_int bits,
115 static __inline struct dpt_ccb* dptgetccb(struct dpt_softc *dpt);
116 static __inline void dptfreeccb(struct dpt_softc *dpt,
117 struct dpt_ccb *dccb);
118 static __inline bus_addr_t dptccbvtop(struct dpt_softc *dpt,
119 struct dpt_ccb *dccb);
121 static __inline int dpt_send_immediate(dpt_softc_t *dpt,
122 eata_ccb_t *cmd_block,
123 u_int32_t cmd_busaddr,
125 u_int ifc, u_int code,
128 /* ==================== Private Function declarations =======================*/
129 static void dptmapmem(void *arg, bus_dma_segment_t *segs,
130 int nseg, int error);
132 static struct sg_map_node*
133 dptallocsgmap(struct dpt_softc *dpt);
135 static int dptallocccbs(dpt_softc_t *dpt);
137 static int dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb,
138 u_int32_t dccb_busaddr, u_int size,
139 u_int page, u_int target, int extent);
140 static void dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb,
141 u_int32_t dccb_busaddr,
144 static void dpt_poll(struct cam_sim *sim);
145 static void dpt_intr_locked(dpt_softc_t *dpt);
147 static void dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
148 int nseg, int error);
150 static void dpt_action(struct cam_sim *sim, union ccb *ccb);
152 static int dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd,
153 u_int32_t cmd_busaddr,
154 u_int command, u_int retries,
155 u_int ifc, u_int code,
157 static void dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb,
158 union ccb *ccb, u_int hba_stat,
159 u_int scsi_stat, u_int32_t resid);
161 static void dpttimeout(void *arg);
162 static void dptshutdown(void *arg, int howto);
164 /* ================= Private Inline Function definitions ====================*/
166 dpt_just_reset(dpt_softc_t * dpt)
168 if ((dpt_inb(dpt, 2) == 'D')
169 && (dpt_inb(dpt, 3) == 'P')
170 && (dpt_inb(dpt, 4) == 'T')
171 && (dpt_inb(dpt, 5) == 'H'))
178 dpt_raid_busy(dpt_softc_t * dpt)
180 if ((dpt_inb(dpt, 0) == 'D')
181 && (dpt_inb(dpt, 1) == 'P')
182 && (dpt_inb(dpt, 2) == 'T'))
190 dpt_pio_wait (u_int32_t base, u_int reg, u_int bits, u_int state)
195 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */
196 c = inb(base + reg) & bits;
207 dpt_wait(dpt_softc_t *dpt, u_int bits, u_int state)
212 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */
213 c = dpt_inb(dpt, HA_RSTATUS) & bits;
222 static __inline struct dpt_ccb*
223 dptgetccb(struct dpt_softc *dpt)
225 struct dpt_ccb* dccb;
228 mtx_assert(&dpt->lock, MA_OWNED);
229 if ((dccb = SLIST_FIRST(&dpt->free_dccb_list)) != NULL) {
230 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links);
232 } else if (dpt->total_dccbs < dpt->max_dccbs) {
234 dccb = SLIST_FIRST(&dpt->free_dccb_list);
236 device_printf(dpt->dev, "Can't malloc DCCB\n");
238 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links);
247 dptfreeccb(struct dpt_softc *dpt, struct dpt_ccb *dccb)
251 mtx_assert(&dpt->lock, MA_OWNED);
252 if ((dccb->state & DCCB_ACTIVE) != 0)
253 LIST_REMOVE(&dccb->ccb->ccb_h, sim_links.le);
254 if ((dccb->state & DCCB_RELEASE_SIMQ) != 0)
255 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
256 else if (dpt->resource_shortage != 0
257 && (dccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
258 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
259 dpt->resource_shortage = FALSE;
261 dccb->state = DCCB_FREE;
262 SLIST_INSERT_HEAD(&dpt->free_dccb_list, dccb, links);
266 static __inline bus_addr_t
267 dptccbvtop(struct dpt_softc *dpt, struct dpt_ccb *dccb)
269 return (dpt->dpt_ccb_busbase
270 + (u_int32_t)((caddr_t)dccb - (caddr_t)dpt->dpt_dccbs));
273 static __inline struct dpt_ccb *
274 dptccbptov(struct dpt_softc *dpt, bus_addr_t busaddr)
276 return (dpt->dpt_dccbs
277 + ((struct dpt_ccb *)busaddr
278 - (struct dpt_ccb *)dpt->dpt_ccb_busbase));
282 * Send a command for immediate execution by the DPT
283 * See above function for IMPORTANT notes.
286 dpt_send_immediate(dpt_softc_t *dpt, eata_ccb_t *cmd_block,
287 u_int32_t cmd_busaddr, u_int retries,
288 u_int ifc, u_int code, u_int code2)
290 return (dpt_send_eata_command(dpt, cmd_block, cmd_busaddr,
291 EATA_CMD_IMMEDIATE, retries, ifc,
296 /* ===================== Private Function definitions =======================*/
298 dptmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
300 bus_addr_t *busaddrp;
302 busaddrp = (bus_addr_t *)arg;
303 *busaddrp = segs->ds_addr;
306 static struct sg_map_node *
307 dptallocsgmap(struct dpt_softc *dpt)
309 struct sg_map_node *sg_map;
311 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
316 /* Allocate S/G space for the next batch of CCBS */
317 if (bus_dmamem_alloc(dpt->sg_dmat, (void **)&sg_map->sg_vaddr,
318 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
319 free(sg_map, M_DEVBUF);
323 (void)bus_dmamap_load(dpt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
324 PAGE_SIZE, dptmapmem, &sg_map->sg_physaddr,
327 SLIST_INSERT_HEAD(&dpt->sg_maps, sg_map, links);
333 * Allocate another chunk of CCB's. Return count of entries added.
336 dptallocccbs(dpt_softc_t *dpt)
338 struct dpt_ccb *next_ccb;
339 struct sg_map_node *sg_map;
346 mtx_assert(&dpt->lock, MA_OWNED);
347 next_ccb = &dpt->dpt_dccbs[dpt->total_dccbs];
349 if (next_ccb == dpt->dpt_dccbs) {
351 * First time through. Re-use the S/G
352 * space we allocated for initialization
355 sg_map = SLIST_FIRST(&dpt->sg_maps);
357 sg_map = dptallocsgmap(dpt);
363 segs = sg_map->sg_vaddr;
364 physaddr = sg_map->sg_physaddr;
366 newcount = (PAGE_SIZE / (dpt->sgsize * sizeof(dpt_sg_t)));
367 for (i = 0; dpt->total_dccbs < dpt->max_dccbs && i < newcount; i++) {
370 error = bus_dmamap_create(dpt->buffer_dmat, /*flags*/0,
374 callout_init_mtx(&next_ccb->timer, &dpt->lock, 0);
375 next_ccb->sg_list = segs;
376 next_ccb->sg_busaddr = htonl(physaddr);
377 next_ccb->eata_ccb.cp_dataDMA = htonl(physaddr);
378 next_ccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr);
379 next_ccb->eata_ccb.cp_reqDMA =
380 htonl(dptccbvtop(dpt, next_ccb)
381 + offsetof(struct dpt_ccb, sense_data));
382 next_ccb->eata_ccb.cp_busaddr = dpt->dpt_ccb_busend;
383 next_ccb->state = DCCB_FREE;
384 next_ccb->tag = dpt->total_dccbs;
385 SLIST_INSERT_HEAD(&dpt->free_dccb_list, next_ccb, links);
387 physaddr += (dpt->sgsize * sizeof(dpt_sg_t));
388 dpt->dpt_ccb_busend += sizeof(*next_ccb);
397 dpt_pio_get_conf (u_int32_t base)
399 static dpt_conf_t * conf;
404 * Allocate a dpt_conf_t
407 conf = (dpt_conf_t *)malloc(sizeof(dpt_conf_t),
408 M_DEVBUF, M_NOWAIT | M_ZERO);
412 * If we didn't get one then we probably won't ever get one.
415 printf("dpt: unable to allocate dpt_conf_t\n");
420 * Reset the controller.
422 outb((base + HA_WCOMMAND), EATA_CMD_RESET);
425 * Wait for the controller to become ready.
426 * For some reason there can be -no- delays after calling reset
427 * before we wait on ready status.
429 if (dpt_pio_wait(base, HA_RSTATUS, HA_SBUSY, 0)) {
430 printf("dpt: timeout waiting for controller to become ready\n");
434 if (dpt_pio_wait(base, HA_RAUXSTAT, HA_ABUSY, 0)) {
435 printf("dpt: timetout waiting for adapter ready.\n");
440 * Send the PIO_READ_CONFIG command.
442 outb((base + HA_WCOMMAND), EATA_CMD_PIO_READ_CONFIG);
445 * Read the data into the struct.
447 p = (u_int16_t *)conf;
448 for (i = 0; i < (sizeof(dpt_conf_t) / 2); i++) {
450 if (dpt_pio_wait(base, HA_RSTATUS, HA_SDRQ, 0)) {
452 printf("dpt: timeout in data read.\n");
456 (*p) = inw(base + HA_RDATA);
460 if (inb(base + HA_RSTATUS) & HA_SERROR) {
462 printf("dpt: error reading configuration data.\n");
466 #define BE_EATA_SIGNATURE 0x45415441
467 #define LE_EATA_SIGNATURE 0x41544145
470 * Test to see if we have a valid card.
472 if ((conf->signature == BE_EATA_SIGNATURE) ||
473 (conf->signature == LE_EATA_SIGNATURE)) {
475 while (inb(base + HA_RSTATUS) & HA_SDRQ) {
476 inw(base + HA_RDATA);
486 * Read a configuration page into the supplied dpt_cont_t buffer.
489 dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr,
490 u_int size, u_int page, u_int target, int extent)
499 mtx_assert(&dpt->lock, MA_OWNED);
500 cp = &dccb->eata_ccb;
501 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(*dpt->sp));
505 cp->Auto_Req_Sen = 1;
506 cp->reqlen = sizeof(struct scsi_sense_data);
509 cp->cp_LUN = 0; /* In the EATA packet */
510 cp->cp_lun = 0; /* In the SCSI command */
512 cp->cp_scsi_cmd = INQUIRY;
515 cp->cp_extent = extent;
518 cp->cp_channel = 0; /* DNC, Interpret mode is set */
520 cp->cp_datalen = htonl(size);
523 * This could be a simple for loop, but we suspected the compiler To
524 * have optimized it a bit too much. Wait for the controller to
527 while (((status = dpt_inb(dpt, HA_RSTATUS)) != (HA_SREADY | HA_SSC)
528 && (status != (HA_SREADY | HA_SSC | HA_SERROR))
529 && (status != (HA_SDRDY | HA_SERROR | HA_SDRQ)))
530 || (dpt_wait(dpt, HA_SBUSY, 0))) {
533 * RAID Drives still Spinning up? (This should only occur if
534 * the DPT controller is in a NON PC (PCI?) platform).
536 if (dpt_raid_busy(dpt)) {
537 device_printf(dpt->dev,
538 "WARNING: Get_conf() RSUS failed.\n");
543 DptStat_Reset_BUSY(dpt->sp);
546 * XXXX We might want to do something more clever than aborting at
547 * this point, like resetting (rebooting) the controller and trying
550 if ((result = dpt_send_eata_command(dpt, cp, dccb_busaddr,
551 EATA_CMD_DMA_SEND_CP,
552 10000, 0, 0, 0)) != 0) {
553 device_printf(dpt->dev,
554 "WARNING: Get_conf() failed (%d) to send "
555 "EATA_CMD_DMA_READ_CONFIG\n",
559 /* Wait for two seconds for a response. This can be slow */
562 && !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
567 /* Grab the status and clear interrupts */
568 status = dpt_inb(dpt, HA_RSTATUS);
571 * Check the status carefully. Return only if the
572 * command was successful.
574 if (((status & HA_SERROR) == 0)
575 && (dpt->sp->hba_stat == 0)
576 && (dpt->sp->scsi_stat == 0)
577 && (dpt->sp->residue_len == 0))
580 if (dpt->sp->scsi_stat == SCSI_STATUS_CHECK_COND)
586 /* Detect Cache parameters and size */
588 dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr,
598 mtx_assert(&dpt->lock, MA_OWNED);
601 * Default setting, for best perfromance..
602 * This is what virtually all cards default to..
604 dpt->cache_type = DPT_CACHE_WRITEBACK;
607 cp = &dccb->eata_ccb;
608 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(dpt->sp));
611 /* Setup the command structure */
614 cp->Auto_Req_Sen = 1;
615 cp->reqlen = sizeof(struct scsi_sense_data);
617 cp->cp_id = 0; /* who cares? The HBA will interpret.. */
618 cp->cp_LUN = 0; /* In the EATA packet */
619 cp->cp_lun = 0; /* In the SCSI command */
622 cp->cp_scsi_cmd = EATA_CMD_DMA_SEND_CP;
631 * Build the EATA Command Packet structure
632 * for a Log Sense Command.
634 cp->cp_cdb[0] = 0x4d;
636 cp->cp_cdb[2] = 0x40 | 0x33;
639 cp->cp_datalen = htonl(512);
641 result = dpt_send_eata_command(dpt, cp, dccb_busaddr,
642 EATA_CMD_DMA_SEND_CP,
645 device_printf(dpt->dev,
646 "WARNING: detect_cache() failed (%d) to send "
647 "EATA_CMD_DMA_SEND_CP\n", result);
650 /* Wait for two seconds for a response. This can be slow... */
653 !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
658 /* Grab the status and clear interrupts */
659 status = dpt_inb(dpt, HA_RSTATUS);
664 if (buff[0] != 0x33) {
667 bytes = DPT_HCP_LENGTH(buff);
668 param = DPT_HCP_FIRST(buff);
670 if (DPT_HCP_CODE(param) != 1) {
672 * DPT Log Page layout error
674 device_printf(dpt->dev, "NOTICE: Log Page (1) layout error\n");
677 if (!(param[4] & 0x4)) {
678 dpt->cache_type = DPT_NO_CACHE;
681 while (DPT_HCP_CODE(param) != 6) {
682 param = DPT_HCP_NEXT(param);
684 || (param >= &buff[bytes])) {
689 if (param[4] & 0x2) {
693 dpt->cache_type = DPT_NO_CACHE;
697 if (param[4] & 0x4) {
698 dpt->cache_type = DPT_CACHE_WRITETHROUGH;
701 /* XXX This isn't correct. This log parameter only has two bytes.... */
703 dpt->cache_size = param[5]
711 dpt_poll(struct cam_sim *sim)
713 dpt_intr_locked(cam_sim_softc(sim));
717 dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
719 struct dpt_ccb *dccb;
721 struct dpt_softc *dpt;
723 dccb = (struct dpt_ccb *)arg;
725 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr;
727 mtx_assert(&dpt->lock, MA_OWNED);
731 device_printf(dpt->dev,
732 "Unexepected error 0x%x returned from "
733 "bus_dmamap_load\n", error);
734 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
735 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
736 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
738 dptfreeccb(dpt, dccb);
745 bus_dma_segment_t *end_seg;
748 end_seg = dm_segs + nseg;
750 /* Copy the segments into our SG list */
752 while (dm_segs < end_seg) {
753 sg->seg_len = htonl(dm_segs->ds_len);
754 sg->seg_addr = htonl(dm_segs->ds_addr);
760 dccb->eata_ccb.scatter = 1;
761 dccb->eata_ccb.cp_dataDMA = dccb->sg_busaddr;
762 dccb->eata_ccb.cp_datalen =
763 htonl(nseg * sizeof(dpt_sg_t));
765 dccb->eata_ccb.cp_dataDMA = dccb->sg_list[0].seg_addr;
766 dccb->eata_ccb.cp_datalen = dccb->sg_list[0].seg_len;
769 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
770 op = BUS_DMASYNC_PREREAD;
772 op = BUS_DMASYNC_PREWRITE;
774 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op);
777 dccb->eata_ccb.cp_dataDMA = 0;
778 dccb->eata_ccb.cp_datalen = 0;
782 * Last time we need to check if this CCB needs to
785 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
787 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
788 dptfreeccb(dpt, dccb);
793 dccb->state |= DCCB_ACTIVE;
794 ccb->ccb_h.status |= CAM_SIM_QUEUED;
795 LIST_INSERT_HEAD(&dpt->pending_ccb_list, &ccb->ccb_h, sim_links.le);
796 callout_reset(&dccb->timer, (ccb->ccb_h.timeout * hz) / 1000,
798 if (dpt_send_eata_command(dpt, &dccb->eata_ccb,
799 dccb->eata_ccb.cp_busaddr,
800 EATA_CMD_DMA_SEND_CP, 0, 0, 0, 0) != 0) {
801 ccb->ccb_h.status = CAM_NO_HBA; /* HBA dead or just busy?? */
803 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
804 dptfreeccb(dpt, dccb);
810 dpt_action(struct cam_sim *sim, union ccb *ccb)
812 struct dpt_softc *dpt;
814 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("dpt_action\n"));
816 dpt = (struct dpt_softc *)cam_sim_softc(sim);
817 mtx_assert(&dpt->lock, MA_OWNED);
819 if ((dpt->state & DPT_HA_SHUTDOWN_ACTIVE) != 0) {
820 xpt_print_path(ccb->ccb_h.path);
821 printf("controller is shutdown. Aborting CCB.\n");
822 ccb->ccb_h.status = CAM_NO_HBA;
827 switch (ccb->ccb_h.func_code) {
828 /* Common cases first */
829 case XPT_SCSI_IO: /* Execute the requested I/O operation */
831 struct ccb_scsiio *csio;
832 struct ccb_hdr *ccbh;
833 struct dpt_ccb *dccb;
834 struct eata_ccb *eccb;
838 /* Max CDB length is 12 bytes */
839 if (csio->cdb_len > 12) {
840 ccb->ccb_h.status = CAM_REQ_INVALID;
844 if ((dccb = dptgetccb(dpt)) == NULL) {
845 dpt->resource_shortage = 1;
846 xpt_freeze_simq(sim, /*count*/1);
847 ccb->ccb_h.status = CAM_REQUEUE_REQ;
851 eccb = &dccb->eata_ccb;
853 /* Link dccb and ccb so we can find one from the other */
855 ccb->ccb_h.ccb_dccb_ptr = dccb;
856 ccb->ccb_h.ccb_dpt_ptr = dpt;
859 * Explicitly set all flags so that the compiler can
860 * be smart about setting them.
862 eccb->SCSI_Reset = 0;
864 eccb->Auto_Req_Sen = (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE)
869 ccb->ccb_h.target_id == dpt->hostid[cam_sim_bus(sim)]
871 eccb->DataOut = (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0;
872 eccb->DataIn = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0;
873 eccb->reqlen = csio->sense_len;
874 eccb->cp_id = ccb->ccb_h.target_id;
875 eccb->cp_channel = cam_sim_bus(sim);
876 eccb->cp_LUN = ccb->ccb_h.target_lun;
878 eccb->cp_dispri = (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
880 eccb->cp_identify = 1;
882 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
883 && csio->tag_action != CAM_TAG_ACTION_NONE) {
884 eccb->cp_msg[0] = csio->tag_action;
885 eccb->cp_msg[1] = dccb->tag;
892 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
893 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
894 bcopy(csio->cdb_io.cdb_ptr,
895 eccb->cp_cdb, csio->cdb_len);
897 /* I guess I could map it in... */
898 ccb->ccb_h.status = CAM_REQ_INVALID;
899 dptfreeccb(dpt, dccb);
904 bcopy(csio->cdb_io.cdb_bytes,
905 eccb->cp_cdb, csio->cdb_len);
908 * If we have any data to send with this command,
909 * map it into bus space.
911 /* Only use S/G if there is a transfer */
912 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
915 error = bus_dmamap_load_ccb(dpt->buffer_dmat,
920 if (error == EINPROGRESS) {
922 * So as to maintain ordering,
923 * freeze the controller queue
924 * until our mapping is
927 xpt_freeze_simq(sim, 1);
928 dccb->state |= CAM_RELEASE_SIMQ;
933 * Does it want them both on or both off?
934 * CAM_DIR_NONE is both on, so this code can
935 * be removed if this is also what the DPT
940 dptexecuteccb(dccb, NULL, 0, 0);
944 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
945 case XPT_ABORT: /* Abort the specified CCB */
947 ccb->ccb_h.status = CAM_REQ_INVALID;
950 case XPT_SET_TRAN_SETTINGS:
952 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
956 case XPT_GET_TRAN_SETTINGS:
957 /* Get default/user set transfer settings for the target */
959 struct ccb_trans_settings *cts = &ccb->cts;
960 struct ccb_trans_settings_scsi *scsi =
961 &cts->proto_specific.scsi;
962 struct ccb_trans_settings_spi *spi =
963 &cts->xport_specific.spi;
965 cts->protocol = PROTO_SCSI;
966 cts->protocol_version = SCSI_REV_2;
967 cts->transport = XPORT_SPI;
968 cts->transport_version = 2;
970 if (cts->type == CTS_TYPE_USER_SETTINGS) {
971 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
972 spi->bus_width = (dpt->max_id > 7)
973 ? MSG_EXT_WDTR_BUS_8_BIT
974 : MSG_EXT_WDTR_BUS_16_BIT;
975 spi->sync_period = 25; /* 10MHz */
976 if (spi->sync_period != 0)
977 spi->sync_offset = 15;
978 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
980 spi->valid = CTS_SPI_VALID_SYNC_RATE
981 | CTS_SPI_VALID_SYNC_OFFSET
982 | CTS_SPI_VALID_BUS_WIDTH
983 | CTS_SPI_VALID_DISC;
984 scsi->valid = CTS_SCSI_VALID_TQ;
985 ccb->ccb_h.status = CAM_REQ_CMP;
987 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
992 case XPT_CALC_GEOMETRY:
995 * XXX Use Adaptec translation until I find out how to
996 * get this information from the card.
998 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1002 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1005 ccb->ccb_h.status = CAM_REQ_CMP;
1009 case XPT_TERM_IO: /* Terminate the I/O process */
1011 ccb->ccb_h.status = CAM_REQ_INVALID;
1014 case XPT_PATH_INQ: /* Path routing inquiry */
1016 struct ccb_pathinq *cpi = &ccb->cpi;
1018 cpi->version_num = 1;
1019 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1020 if (dpt->max_id > 7)
1021 cpi->hba_inquiry |= PI_WIDE_16;
1022 cpi->target_sprt = 0;
1024 cpi->hba_eng_cnt = 0;
1025 cpi->max_target = dpt->max_id;
1026 cpi->max_lun = dpt->max_lun;
1027 cpi->initiator_id = dpt->hostid[cam_sim_bus(sim)];
1028 cpi->bus_id = cam_sim_bus(sim);
1029 cpi->base_transfer_speed = 3300;
1030 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1031 strncpy(cpi->hba_vid, "DPT", HBA_IDLEN);
1032 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1033 cpi->unit_number = cam_sim_unit(sim);
1034 cpi->transport = XPORT_SPI;
1035 cpi->transport_version = 2;
1036 cpi->protocol = PROTO_SCSI;
1037 cpi->protocol_version = SCSI_REV_2;
1038 cpi->ccb_h.status = CAM_REQ_CMP;
1043 ccb->ccb_h.status = CAM_REQ_INVALID;
1050 * This routine will try to send an EATA command to the DPT HBA.
1051 * It will, by default, try 20,000 times, waiting 50us between tries.
1052 * It returns 0 on success and 1 on failure.
1055 dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd_block,
1056 u_int32_t cmd_busaddr, u_int command, u_int retries,
1057 u_int ifc, u_int code, u_int code2)
1065 * I hate this polling nonsense. Wish there was a way to tell the DPT
1066 * to go get commands at its own pace, or to interrupt when ready.
1067 * In the mean time we will measure how many itterations it really
1070 for (loop = 0; loop < retries; loop++) {
1071 if ((dpt_inb(dpt, HA_RAUXSTAT) & HA_ABUSY) == 0)
1077 if (loop < retries) {
1078 #ifdef DPT_MEASURE_PERFORMANCE
1079 if (loop > dpt->performance.max_eata_tries)
1080 dpt->performance.max_eata_tries = loop;
1082 if (loop < dpt->performance.min_eata_tries)
1083 dpt->performance.min_eata_tries = loop;
1086 #ifdef DPT_MEASURE_PERFORMANCE
1087 ++dpt->performance.command_too_busy;
1092 /* The controller is alive, advance the wedge timer */
1093 #ifdef DPT_RESET_HBA
1094 dpt->last_contact = microtime_now;
1097 if (cmd_block == NULL)
1099 #if (BYTE_ORDER == BIG_ENDIAN)
1101 cmd_busaddr = ((cmd_busaddr >> 24) & 0xFF)
1102 | ((cmd_busaddr >> 16) & 0xFF)
1103 | ((cmd_busaddr >> 8) & 0xFF)
1104 | (cmd_busaddr & 0xFF);
1107 /* And now the address */
1108 dpt_outl(dpt, HA_WDMAADDR, cmd_busaddr);
1110 if (command == EATA_CMD_IMMEDIATE) {
1111 if (cmd_block == NULL) {
1112 dpt_outb(dpt, HA_WCODE2, code2);
1113 dpt_outb(dpt, HA_WCODE, code);
1115 dpt_outb(dpt, HA_WIFC, ifc);
1117 dpt_outb(dpt, HA_WCOMMAND, command);
1123 /* ==================== Exported Function definitions =======================*/
1125 dpt_alloc(device_t dev)
1127 dpt_softc_t *dpt = device_get_softc(dev);
1130 mtx_init(&dpt->lock, "dpt", NULL, MTX_DEF);
1131 SLIST_INIT(&dpt->free_dccb_list);
1132 LIST_INIT(&dpt->pending_ccb_list);
1133 for (i = 0; i < MAX_CHANNELS; i++)
1134 dpt->resetlevel[i] = DPT_HA_OK;
1136 #ifdef DPT_MEASURE_PERFORMANCE
1137 dpt_reset_performance(dpt);
1138 #endif /* DPT_MEASURE_PERFORMANCE */
1143 dpt_free(struct dpt_softc *dpt)
1145 switch (dpt->init_level) {
1148 bus_dmamap_unload(dpt->dccb_dmat, dpt->dccb_dmamap);
1150 bus_dmamem_free(dpt->dccb_dmat, dpt->dpt_dccbs,
1152 bus_dmamap_destroy(dpt->dccb_dmat, dpt->dccb_dmamap);
1154 bus_dma_tag_destroy(dpt->dccb_dmat);
1156 bus_dma_tag_destroy(dpt->buffer_dmat);
1159 struct sg_map_node *sg_map;
1161 while ((sg_map = SLIST_FIRST(&dpt->sg_maps)) != NULL) {
1162 SLIST_REMOVE_HEAD(&dpt->sg_maps, links);
1163 bus_dmamap_unload(dpt->sg_dmat,
1165 bus_dmamem_free(dpt->sg_dmat, sg_map->sg_vaddr,
1167 free(sg_map, M_DEVBUF);
1169 bus_dma_tag_destroy(dpt->sg_dmat);
1174 mtx_destroy(&dpt->lock);
1178 dpt_alloc_resources (device_t dev)
1183 dpt = device_get_softc(dev);
1185 dpt->io_res = bus_alloc_resource_any(dev, dpt->io_type, &dpt->io_rid,
1187 if (dpt->io_res == NULL) {
1188 device_printf(dev, "No I/O space?!\n");
1193 dpt->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dpt->irq_rid,
1195 if (dpt->irq_res == NULL) {
1196 device_printf(dev, "No IRQ!\n");
1208 dpt_release_resources (device_t dev)
1210 struct dpt_softc * dpt;
1212 dpt = device_get_softc(dev);
1215 bus_teardown_intr(dev, dpt->irq_res, dpt->ih);
1217 bus_release_resource(dev, dpt->io_type, dpt->io_rid, dpt->io_res);
1219 bus_release_resource(dev, SYS_RES_IRQ, dpt->irq_rid, dpt->irq_res);
1221 bus_release_resource(dev, SYS_RES_DRQ, dpt->drq_rid, dpt->drq_res);
1226 static u_int8_t string_sizes[] =
1228 sizeof(((dpt_inq_t*)NULL)->vendor),
1229 sizeof(((dpt_inq_t*)NULL)->modelNum),
1230 sizeof(((dpt_inq_t*)NULL)->firmware),
1231 sizeof(((dpt_inq_t*)NULL)->protocol),
1235 dpt_init(struct dpt_softc *dpt)
1238 struct sg_map_node *sg_map;
1245 dpt->init_level = 0;
1246 SLIST_INIT(&dpt->sg_maps);
1247 mtx_lock(&dpt->lock);
1249 #ifdef DPT_RESET_BOARD
1250 device_printf(dpt->dev, "resetting HBA\n");
1251 dpt_outb(dpt, HA_WCOMMAND, EATA_CMD_RESET);
1253 /* XXX Shouldn't we poll a status register or something??? */
1255 /* DMA tag for our S/G structures. We allocate in page sized chunks */
1256 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat,
1259 /* lowaddr */ BUS_SPACE_MAXADDR,
1260 /* highaddr */ BUS_SPACE_MAXADDR,
1262 /* filterarg */ NULL,
1263 /* maxsize */ PAGE_SIZE,
1265 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1267 /* lockfunc */ NULL,
1269 &dpt->sg_dmat) != 0) {
1276 * We allocate our DPT ccbs as a contiguous array of bus dma'able
1277 * memory. To get the allocation size, we need to know how many
1278 * ccbs the card supports. This requires a ccb. We solve this
1279 * chicken and egg problem by allocating some re-usable S/G space
1280 * up front, and treating it as our status packet, CCB, and target
1281 * memory space for these commands.
1283 sg_map = dptallocsgmap(dpt);
1287 dpt->sp = (volatile dpt_sp_t *)sg_map->sg_vaddr;
1288 dccb = (struct dpt_ccb *)(uintptr_t)(volatile void *)&dpt->sp[1];
1289 bzero(dccb, sizeof(*dccb));
1290 dpt->sp_physaddr = sg_map->sg_physaddr;
1291 dccb->eata_ccb.cp_dataDMA =
1292 htonl(sg_map->sg_physaddr + sizeof(dpt_sp_t) + sizeof(*dccb));
1293 dccb->eata_ccb.cp_busaddr = ~0;
1294 dccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr);
1295 dccb->eata_ccb.cp_reqDMA = htonl(dpt->sp_physaddr + sizeof(*dccb)
1296 + offsetof(struct dpt_ccb, sense_data));
1298 /* Okay. Fetch our config */
1299 bzero(&dccb[1], sizeof(conf)); /* data area */
1300 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1301 sizeof(conf), 0xc1, 7, 1);
1304 device_printf(dpt->dev, "Failed to get board configuration\n");
1307 bcopy(&dccb[1], &conf, sizeof(conf));
1309 bzero(&dccb[1], sizeof(dpt->board_data));
1310 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1311 sizeof(dpt->board_data), 0, conf.scsi_id0, 0);
1313 device_printf(dpt->dev, "Failed to get inquiry information\n");
1316 bcopy(&dccb[1], &dpt->board_data, sizeof(dpt->board_data));
1318 dpt_detect_cache(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1319 (u_int8_t *)&dccb[1]);
1321 switch (ntohl(conf.splen)) {
1323 dpt->EATA_revision = 'a';
1326 dpt->EATA_revision = 'b';
1329 dpt->EATA_revision = 'c';
1332 dpt->EATA_revision = 'z';
1335 dpt->EATA_revision = '?';
1338 dpt->max_id = conf.MAX_ID;
1339 dpt->max_lun = conf.MAX_LUN;
1340 dpt->irq = conf.IRQ;
1341 dpt->dma_channel = (8 - conf.DMA_channel) & 7;
1342 dpt->channels = conf.MAX_CHAN + 1;
1343 dpt->state |= DPT_HA_OK;
1345 dpt->primary = FALSE;
1347 dpt->primary = TRUE;
1349 dpt->more_support = conf.MORE_support;
1351 if (strncmp(dpt->board_data.firmware, "07G0", 4) >= 0)
1352 dpt->immediate_support = 1;
1354 dpt->immediate_support = 0;
1356 dpt->broken_INQUIRY = FALSE;
1358 dpt->cplen = ntohl(conf.cplen);
1359 dpt->cppadlen = ntohs(conf.cppadlen);
1360 dpt->max_dccbs = ntohs(conf.queuesiz);
1362 if (dpt->max_dccbs > 256) {
1363 device_printf(dpt->dev, "Max CCBs reduced from %d to "
1364 "256 due to tag algorithm\n", dpt->max_dccbs);
1365 dpt->max_dccbs = 256;
1368 dpt->hostid[0] = conf.scsi_id0;
1369 dpt->hostid[1] = conf.scsi_id1;
1370 dpt->hostid[2] = conf.scsi_id2;
1375 dpt->sgsize = ntohs(conf.SGsiz);
1377 /* We can only get 64k buffers, so don't bother to waste space. */
1378 if (dpt->sgsize < 17 || dpt->sgsize > 32)
1381 if (dpt->sgsize > dpt_max_segs)
1382 dpt->sgsize = dpt_max_segs;
1384 /* DMA tag for mapping buffers into device visible space. */
1385 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat,
1388 /* lowaddr */ BUS_SPACE_MAXADDR,
1389 /* highaddr */ BUS_SPACE_MAXADDR,
1391 /* filterarg */ NULL,
1392 /* maxsize */ MAXBSIZE,
1393 /* nsegments */ dpt->sgsize,
1394 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1395 /* flags */ BUS_DMA_ALLOCNOW,
1396 /* lockfunc */ busdma_lock_mutex,
1397 /* lockarg */ &dpt->lock,
1398 &dpt->buffer_dmat) != 0) {
1399 device_printf(dpt->dev,
1400 "bus_dma_tag_create(...,dpt->buffer_dmat) failed\n");
1406 /* DMA tag for our ccb structures and interrupt status packet */
1407 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat,
1410 /* lowaddr */ BUS_SPACE_MAXADDR,
1411 /* highaddr */ BUS_SPACE_MAXADDR,
1413 /* filterarg */ NULL,
1414 /* maxsize */ (dpt->max_dccbs *
1415 sizeof(struct dpt_ccb)) +
1418 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1420 /* lockfunc */ NULL,
1422 &dpt->dccb_dmat) != 0) {
1423 device_printf(dpt->dev,
1424 "bus_dma_tag_create(...,dpt->dccb_dmat) failed\n");
1430 /* Allocation for our ccbs and interrupt status packet */
1431 if (bus_dmamem_alloc(dpt->dccb_dmat, (void **)&dpt->dpt_dccbs,
1432 BUS_DMA_NOWAIT, &dpt->dccb_dmamap) != 0) {
1433 device_printf(dpt->dev,
1434 "bus_dmamem_alloc(dpt->dccb_dmat,...) failed\n");
1440 /* And permanently map them */
1441 bus_dmamap_load(dpt->dccb_dmat, dpt->dccb_dmamap,
1443 (dpt->max_dccbs * sizeof(struct dpt_ccb))
1445 dptmapmem, &dpt->dpt_ccb_busbase, /*flags*/0);
1447 /* Clear them out. */
1448 bzero(dpt->dpt_dccbs,
1449 (dpt->max_dccbs * sizeof(struct dpt_ccb)) + sizeof(dpt_sp_t));
1451 dpt->dpt_ccb_busend = dpt->dpt_ccb_busbase;
1453 dpt->sp = (dpt_sp_t*)&dpt->dpt_dccbs[dpt->max_dccbs];
1454 dpt->sp_physaddr = dpt->dpt_ccb_busbase
1455 + (dpt->max_dccbs * sizeof(dpt_ccb_t));
1458 /* Allocate our first batch of ccbs */
1459 if (dptallocccbs(dpt) == 0) {
1460 device_printf(dpt->dev, "dptallocccbs(dpt) == 0\n");
1461 mtx_unlock(&dpt->lock);
1465 /* Prepare for Target Mode */
1466 dpt->target_mode_enabled = 1;
1468 /* Nuke excess spaces from inquiry information */
1469 strp = dpt->board_data.vendor;
1470 for (i = 0; i < sizeof(string_sizes); i++) {
1471 index = string_sizes[i] - 1;
1472 while (index && (strp[index] == ' '))
1473 strp[index--] = '\0';
1474 strp += string_sizes[i];
1477 device_printf(dpt->dev, "%.8s %.16s FW Rev. %.4s, ",
1478 dpt->board_data.vendor,
1479 dpt->board_data.modelNum, dpt->board_data.firmware);
1481 printf("%d channel%s, ", dpt->channels, dpt->channels > 1 ? "s" : "");
1483 if (dpt->cache_type != DPT_NO_CACHE
1484 && dpt->cache_size != 0) {
1485 printf("%s Cache, ",
1486 dpt->cache_type == DPT_CACHE_WRITETHROUGH
1487 ? "Write-Through" : "Write-Back");
1490 printf("%d CCBs\n", dpt->max_dccbs);
1491 mtx_unlock(&dpt->lock);
1495 mtx_unlock(&dpt->lock);
1500 dpt_attach(dpt_softc_t *dpt)
1502 struct cam_devq *devq;
1506 * Create the device queue for our SIM.
1508 devq = cam_simq_alloc(dpt->max_dccbs);
1512 mtx_lock(&dpt->lock);
1513 for (i = 0; i < dpt->channels; i++) {
1515 * Construct our SIM entry
1517 dpt->sims[i] = cam_sim_alloc(dpt_action, dpt_poll, "dpt",
1518 dpt, device_get_unit(dpt->dev), &dpt->lock,
1520 /*tagged*/dpt->max_dccbs, devq);
1521 if (dpt->sims[i] == NULL) {
1523 cam_simq_free(devq);
1525 printf( "%s(): Unable to attach bus %d "
1526 "due to resource shortage\n",
1531 if (xpt_bus_register(dpt->sims[i], dpt->dev, i) != CAM_SUCCESS){
1532 cam_sim_free(dpt->sims[i], /*free_devq*/i == 0);
1533 dpt->sims[i] = NULL;
1537 if (xpt_create_path(&dpt->paths[i], /*periph*/NULL,
1538 cam_sim_path(dpt->sims[i]),
1539 CAM_TARGET_WILDCARD,
1540 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1541 xpt_bus_deregister(cam_sim_path(dpt->sims[i]));
1542 cam_sim_free(dpt->sims[i], /*free_devq*/i == 0);
1543 dpt->sims[i] = NULL;
1548 mtx_unlock(&dpt->lock);
1550 EVENTHANDLER_REGISTER(shutdown_final, dptshutdown,
1551 dpt, SHUTDOWN_PRI_DEFAULT);
1556 dpt_detach (device_t dev)
1558 struct dpt_softc * dpt;
1561 dpt = device_get_softc(dev);
1563 mtx_lock(&dpt->lock);
1564 for (i = 0; i < dpt->channels; i++) {
1566 xpt_async(AC_LOST_DEVICE, dpt->paths[i], NULL);
1568 xpt_free_path(dpt->paths[i]);
1569 xpt_bus_deregister(cam_sim_path(dpt->sims[i]));
1570 cam_sim_free(dpt->sims[i], /*free_devq*/TRUE);
1572 mtx_unlock(&dpt->lock);
1574 dptshutdown((void *)dpt, SHUTDOWN_PRI_DEFAULT);
1576 dpt_release_resources(dev);
1584 * This is the interrupt handler for the DPT driver.
1592 mtx_lock(&dpt->lock);
1593 dpt_intr_locked(dpt);
1594 mtx_unlock(&dpt->lock);
1598 dpt_intr_locked(dpt_softc_t *dpt)
1606 u_int32_t residue_len; /* Number of bytes not transferred */
1608 /* First order of business is to check if this interrupt is for us */
1609 while (((aux_status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ) != 0) {
1612 * What we want to do now, is to capture the status, all of it,
1613 * move it where it belongs, wake up whoever sleeps waiting to
1614 * process this result, and get out of here.
1616 if (dpt->sp->ccb_busaddr < dpt->dpt_ccb_busbase
1617 || dpt->sp->ccb_busaddr >= dpt->dpt_ccb_busend) {
1618 device_printf(dpt->dev,
1619 "Encountered bogus status packet\n");
1620 status = dpt_inb(dpt, HA_RSTATUS);
1624 dccb = dptccbptov(dpt, dpt->sp->ccb_busaddr);
1626 dpt->sp->ccb_busaddr = ~0;
1628 /* Ignore status packets with EOC not set */
1629 if (dpt->sp->EOC == 0) {
1630 device_printf(dpt->dev,
1631 "ERROR: Request %d received with "
1632 "clear EOC.\n Marking as LOST.\n",
1633 dccb->transaction_id);
1635 /* This CLEARS the interrupt! */
1636 status = dpt_inb(dpt, HA_RSTATUS);
1642 * Double buffer the status information so the hardware can
1643 * work on updating the status packet while we decifer the
1644 * one we were just interrupted for.
1645 * According to Mark Salyzyn, we only need few pieces of it.
1647 hba_stat = dpt->sp->hba_stat;
1648 scsi_stat = dpt->sp->scsi_stat;
1649 residue_len = dpt->sp->residue_len;
1651 /* Clear interrupts, check for error */
1652 if ((status = dpt_inb(dpt, HA_RSTATUS)) & HA_SERROR) {
1654 * Error Condition. Check for magic cookie. Exit
1655 * this test on earliest sign of non-reset condition
1658 /* Check that this is not a board reset interrupt */
1659 if (dpt_just_reset(dpt)) {
1660 device_printf(dpt->dev, "HBA rebooted.\n"
1661 " All transactions should be "
1664 device_printf(dpt->dev,
1665 ">>---->> This is incomplete, "
1666 "fix me.... <<----<<");
1667 panic("DPT Rebooted");
1673 callout_stop(&dccb->timer);
1674 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1675 bus_dmasync_op_t op;
1677 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1678 op = BUS_DMASYNC_POSTREAD;
1680 op = BUS_DMASYNC_POSTWRITE;
1681 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op);
1682 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
1685 /* Common Case inline... */
1686 if (hba_stat == HA_NO_ERROR) {
1687 ccb->csio.scsi_status = scsi_stat;
1688 ccb->ccb_h.status = 0;
1689 switch (scsi_stat) {
1690 case SCSI_STATUS_OK:
1691 ccb->ccb_h.status |= CAM_REQ_CMP;
1693 case SCSI_STATUS_CHECK_COND:
1694 case SCSI_STATUS_CMD_TERMINATED:
1695 bcopy(&dccb->sense_data, &ccb->csio.sense_data,
1696 ccb->csio.sense_len);
1697 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1700 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1701 /* XXX Freeze DevQ */
1704 ccb->csio.resid = residue_len;
1705 dptfreeccb(dpt, dccb);
1708 dptprocesserror(dpt, dccb, ccb, hba_stat, scsi_stat,
1715 dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, union ccb *ccb,
1716 u_int hba_stat, u_int scsi_stat, u_int32_t resid)
1718 ccb->csio.resid = resid;
1721 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1724 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1726 case HA_SCSIBUS_RESET:
1727 case HA_HBA_POWER_UP: /* Similar effect to a bus reset??? */
1728 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1731 case HA_CP_RESET: /* XXX ??? */
1732 case HA_CP_ABORT_NA: /* XXX ??? */
1733 case HA_CP_RESET_NA: /* XXX ??? */
1734 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1735 ccb->ccb_h.status = CAM_REQ_ABORTED;
1740 case HA_PCI_STABORT:
1744 ccb->ccb_h.status = CAM_UNCOR_PARITY;
1746 case HA_UNX_MSGRJCT:
1747 ccb->ccb_h.status = CAM_MSG_REJECT_REC;
1749 case HA_UNX_BUSPHASE:
1750 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1752 case HA_UNX_BUS_FREE:
1753 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1756 case HA_RESET_STUCK:
1758 * Dead??? Can the controller get unstuck
1759 * from these conditions
1761 ccb->ccb_h.status = CAM_NO_HBA;
1763 case HA_RSENSE_FAIL:
1764 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1767 device_printf(dpt->dev, "Undocumented Error %x\n", hba_stat);
1768 printf("Please mail this message to shimon@simon-shapiro.org\n");
1769 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1772 dptfreeccb(dpt, dccb);
1777 dpttimeout(void *arg)
1779 struct dpt_ccb *dccb;
1781 struct dpt_softc *dpt;
1783 dccb = (struct dpt_ccb *)arg;
1785 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr;
1786 mtx_assert(&dpt->lock, MA_OWNED);
1787 xpt_print_path(ccb->ccb_h.path);
1788 printf("CCB %p - timed out\n", (void *)dccb);
1791 * Try to clear any pending jobs. FreeBSD will lose interrupts,
1792 * leaving the controller suspended, and commands timed-out.
1793 * By calling the interrupt handler, any command thus stuck will be
1796 dpt_intr_locked(dpt);
1798 if ((dccb->state & DCCB_ACTIVE) == 0) {
1799 xpt_print_path(ccb->ccb_h.path);
1800 printf("CCB %p - timed out CCB already completed\n",
1805 /* Abort this particular command. Leave all others running */
1806 dpt_send_immediate(dpt, &dccb->eata_ccb, dccb->eata_ccb.cp_busaddr,
1807 /*retries*/20000, EATA_SPECIFIC_ABORT, 0, 0);
1808 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1812 * Shutdown the controller and ensure that the cache is completely flushed.
1813 * Called from the shutdown_final event after all disk access has completed.
1816 dptshutdown(void *arg, int howto)
1820 dpt = (dpt_softc_t *)arg;
1822 device_printf(dpt->dev,
1823 "Shutting down (mode %x) HBA. Please wait...\n", howto);
1826 * What we do for a shutdown, is give the DPT early power loss warning
1828 mtx_lock(&dpt->lock);
1829 dpt_send_immediate(dpt, NULL, 0, EATA_POWER_OFF_WARN, 0, 0, 0);
1830 mtx_unlock(&dpt->lock);
1831 DELAY(1000 * 1000 * 5);
1832 device_printf(dpt->dev, "Controller was warned of shutdown and is now "
1836 /*============================================================================*/
1839 #ifdef DPT_RESET_HBA
1842 ** Function name : dpt_reset_hba
1844 ** Description : Reset the HBA and properly discard all pending work
1849 dpt_reset_hba(dpt_softc_t *dpt)
1852 dpt_ccb_t dccb, *dccbp;
1854 struct scsi_xfer *xs;
1856 mtx_assert(&dpt->lock, MA_OWNED);
1858 /* Prepare a control block. The SCSI command part is immaterial */
1861 dccb.state = DPT_CCB_STATE_NEW;
1862 dccb.std_callback = NULL;
1863 dccb.wrbuff_callback = NULL;
1865 ccb = &dccb.eata_ccb;
1866 ccb->CP_OpCode = EATA_CMD_RESET;
1867 ccb->SCSI_Reset = 0;
1869 ccb->Auto_Req_Sen = 1;
1870 ccb->cp_id = 0; /* Should be ignored */
1874 ccb->reqlen = htonl(sizeof(struct scsi_sense_data));
1875 ccb->cp_statDMA = htonl(vtophys(&ccb->cp_statDMA));
1876 ccb->cp_reqDMA = htonl(vtophys(&ccb->cp_reqDMA));
1877 ccb->cp_viraddr = (u_int32_t) & ccb;
1879 ccb->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
1880 ccb->cp_scsi_cmd = 0; /* Should be ignored */
1882 /* Lock up the submitted queue. We are very persistant here */
1883 while (dpt->queue_status & DPT_SUBMITTED_QUEUE_ACTIVE) {
1887 dpt->queue_status |= DPT_SUBMITTED_QUEUE_ACTIVE;
1889 /* Send the RESET message */
1890 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb,
1891 EATA_CMD_RESET, 0, 0, 0, 0)) != 0) {
1892 device_printf(dpt->dev, "Failed to send the RESET message.\n"
1893 " Trying cold boot (ouch!)\n");
1896 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb,
1897 EATA_COLD_BOOT, 0, 0,
1899 panic("%s: Faild to cold boot the HBA\n",
1900 device_get_nameunit(dpt->dev));
1902 #ifdef DPT_MEASURE_PERFORMANCE
1903 dpt->performance.cold_boots++;
1904 #endif /* DPT_MEASURE_PERFORMANCE */
1907 #ifdef DPT_MEASURE_PERFORMANCE
1908 dpt->performance.warm_starts++;
1909 #endif /* DPT_MEASURE_PERFORMANCE */
1911 device_printf(dpt->dev,
1912 "Aborting pending requests. O/S should re-submit\n");
1914 while ((dccbp = TAILQ_FIRST(&dpt->completed_ccbs)) != NULL) {
1915 struct scsi_xfer *xs = dccbp->xs;
1917 /* Not all transactions have xs structs */
1919 /* Tell the kernel proper this did not complete well */
1920 xs->error |= XS_SELTIMEOUT;
1921 xs->flags |= SCSI_ITSDONE;
1925 dpt_Qremove_submitted(dpt, dccbp);
1927 /* Remember, Callbacks are NOT in the standard queue */
1928 if (dccbp->std_callback != NULL) {
1929 (dccbp->std_callback)(dpt, dccbp->eata_ccb.cp_channel,
1932 dpt_Qpush_free(dpt, dccbp);
1936 device_printf(dpt->dev, "reset done aborting all pending commands\n");
1937 dpt->queue_status &= ~DPT_SUBMITTED_QUEUE_ACTIVE;
1940 #endif /* DPT_RESET_HBA */
1943 * Build a Command Block for target mode READ/WRITE BUFFER,
1944 * with the ``sync'' bit ON.
1946 * Although the length and offset are 24 bit fields in the command, they cannot
1947 * exceed 8192 bytes, so we take them as short integers andcheck their range.
1948 * If they are sensless, we round them to zero offset, maximum length and
1953 dpt_target_ccb(dpt_softc_t * dpt, int bus, u_int8_t target, u_int8_t lun,
1954 dpt_ccb_t * ccb, int mode, u_int8_t command,
1955 u_int16_t length, u_int16_t offset)
1959 mtx_assert(&dpt->lock, MA_OWNED);
1960 if ((length + offset) > DPT_MAX_TARGET_MODE_BUFFER_SIZE) {
1961 device_printf(dpt->dev,
1962 "Length of %d, and offset of %d are wrong\n",
1964 length = DPT_MAX_TARGET_MODE_BUFFER_SIZE;
1969 ccb->state = DPT_CCB_STATE_NEW;
1970 ccb->std_callback = (ccb_callback) dpt_target_done;
1971 ccb->wrbuff_callback = NULL;
1973 cp = &ccb->eata_ccb;
1974 cp->CP_OpCode = EATA_CMD_DMA_SEND_CP;
1977 cp->Auto_Req_Sen = 1;
1982 cp->reqlen = htonl(sizeof(struct scsi_sense_data));
1983 cp->cp_statDMA = htonl(vtophys(&cp->cp_statDMA));
1984 cp->cp_reqDMA = htonl(vtophys(&cp->cp_reqDMA));
1985 cp->cp_viraddr = (u_int32_t) & ccb;
1987 cp->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
1989 cp->cp_scsi_cmd = command;
1990 cp->cp_cdb[1] = (u_int8_t) (mode & SCSI_TM_MODE_MASK);
1991 cp->cp_lun = lun; /* Order is important here! */
1992 cp->cp_cdb[2] = 0x00; /* Buffer Id, only 1 :-( */
1993 cp->cp_cdb[3] = (length >> 16) & 0xFF; /* Buffer offset MSB */
1994 cp->cp_cdb[4] = (length >> 8) & 0xFF;
1995 cp->cp_cdb[5] = length & 0xFF;
1996 cp->cp_cdb[6] = (length >> 16) & 0xFF; /* Length MSB */
1997 cp->cp_cdb[7] = (length >> 8) & 0xFF;
1998 cp->cp_cdb[8] = length & 0xFF; /* Length LSB */
1999 cp->cp_cdb[9] = 0; /* No sync, no match bits */
2002 * This could be optimized to live in dpt_register_buffer.
2003 * We keep it here, just in case the kernel decides to reallocate pages
2005 if (dpt_scatter_gather(dpt, ccb, DPT_RW_BUFFER_SIZE,
2006 dpt->rw_buffer[bus][target][lun])) {
2007 device_printf(dpt->dev, "Failed to setup Scatter/Gather for "
2008 "Target-Mode buffer\n");
2012 /* Setup a target mode READ command */
2015 dpt_set_target(int redo, dpt_softc_t * dpt,
2016 u_int8_t bus, u_int8_t target, u_int8_t lun, int mode,
2017 u_int16_t length, u_int16_t offset, dpt_ccb_t * ccb)
2020 mtx_assert(&dpt->lock, MA_OWNED);
2021 if (dpt->target_mode_enabled) {
2023 dpt_target_ccb(dpt, bus, target, lun, ccb, mode,
2024 SCSI_TM_READ_BUFFER, length, offset);
2026 ccb->transaction_id = ++dpt->commands_processed;
2028 #ifdef DPT_MEASURE_PERFORMANCE
2029 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++;
2030 ccb->command_started = microtime_now;
2032 dpt_Qadd_waiting(dpt, ccb);
2033 dpt_sched_queue(dpt);
2035 device_printf(dpt->dev,
2036 "Target Mode Request, but Target Mode is OFF\n");
2041 * Schedule a buffer to be sent to another target.
2042 * The work will be scheduled and the callback provided will be called when
2043 * the work is actually done.
2045 * Please NOTE: ``Anyone'' can send a buffer, but only registered clients
2046 * get notified of receipt of buffers.
2050 dpt_send_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun,
2051 u_int8_t mode, u_int16_t length, u_int16_t offset, void *data,
2052 buff_wr_done callback)
2055 dpt_ccb_t *ccb = NULL;
2057 /* This is an external call. Be a bit paranoid */
2058 dpt = devclass_get_device(dpt_devclass, unit);
2060 return (INVALID_UNIT);
2062 mtx_lock(&dpt->lock);
2063 if (dpt->target_mode_enabled) {
2064 if ((channel >= dpt->channels) || (target > dpt->max_id) ||
2065 (lun > dpt->max_lun)) {
2066 mtx_unlock(&dpt->lock);
2067 return (INVALID_SENDER);
2069 if ((dpt->rw_buffer[channel][target][lun] == NULL) ||
2070 (dpt->buffer_receiver[channel][target][lun] == NULL)) {
2071 mtx_unlock(&dpt->lock);
2072 return (NOT_REGISTERED);
2075 /* Process the free list */
2076 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2077 device_printf(dpt->dev,
2078 "ERROR: Cannot allocate any more free CCB's.\n"
2079 " Please try later\n");
2080 mtx_unlock(&dpt->lock);
2081 return (NO_RESOURCES);
2083 /* Now grab the newest CCB */
2084 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2085 mtx_unlock(&dpt->lock);
2086 panic("%s: Got a NULL CCB from pop_free()\n",
2087 device_get_nameunit(dpt->dev));
2090 bcopy(dpt->rw_buffer[channel][target][lun] + offset, data, length);
2091 dpt_target_ccb(dpt, channel, target, lun, ccb, mode,
2092 SCSI_TM_WRITE_BUFFER,
2094 ccb->std_callback = (ccb_callback) callback; /* Potential trouble */
2096 ccb->transaction_id = ++dpt->commands_processed;
2098 #ifdef DPT_MEASURE_PERFORMANCE
2099 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++;
2100 ccb->command_started = microtime_now;
2102 dpt_Qadd_waiting(dpt, ccb);
2103 dpt_sched_queue(dpt);
2105 mtx_unlock(&dpt->lock);
2108 mtx_unlock(&dpt->lock);
2109 return (DRIVER_DOWN);
2113 dpt_target_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
2117 cp = &ccb->eata_ccb;
2120 * Remove the CCB from the waiting queue.
2121 * We do NOT put it back on the free, etc., queues as it is a special
2122 * ccb, owned by the dpt_softc of this unit.
2124 dpt_Qremove_completed(dpt, ccb);
2126 #define br_channel (ccb->eata_ccb.cp_channel)
2127 #define br_target (ccb->eata_ccb.cp_id)
2128 #define br_lun (ccb->eata_ccb.cp_LUN)
2129 #define br_index [br_channel][br_target][br_lun]
2130 #define read_buffer_callback (dpt->buffer_receiver br_index )
2131 #define read_buffer (dpt->rw_buffer[br_channel][br_target][br_lun])
2132 #define cb(offset) (ccb->eata_ccb.cp_cdb[offset])
2133 #define br_offset ((cb(3) << 16) | (cb(4) << 8) | cb(5))
2134 #define br_length ((cb(6) << 16) | (cb(7) << 8) | cb(8))
2136 /* Different reasons for being here, you know... */
2137 switch (ccb->eata_ccb.cp_scsi_cmd) {
2138 case SCSI_TM_READ_BUFFER:
2139 if (read_buffer_callback != NULL) {
2140 /* This is a buffer generated by a kernel process */
2141 read_buffer_callback(device_get_unit(dpt->dev),
2142 br_channel, br_target, br_lun,
2144 br_offset, br_length);
2147 * This is a buffer waited for by a user (sleeping)
2153 /* We ALWAYS re-issue the same command; args are don't-care */
2154 dpt_set_target(1, 0, 0, 0, 0, 0, 0, 0, 0);
2157 case SCSI_TM_WRITE_BUFFER:
2158 (ccb->wrbuff_callback) (device_get_unit(dpt->dev), br_channel,
2159 br_target, br_offset, br_length,
2160 br_lun, ccb->status_packet.hba_stat);
2163 device_printf(dpt->dev,
2164 "%s is an unsupported command for target mode\n",
2165 scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd));
2167 dpt->target_ccb[br_channel][br_target][br_lun] = NULL;
2168 dpt_Qpush_free(dpt, ccb);
2173 * Use this function to register a client for a buffer read target operation.
2174 * The function you register will be called every time a buffer is received
2175 * by the target mode code.
2178 dpt_register_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun,
2179 u_int8_t mode, u_int16_t length, u_int16_t offset,
2180 dpt_rec_buff callback, dpt_rb_op_t op)
2183 dpt_ccb_t *ccb = NULL;
2186 dpt = devclass_get_device(dpt_devclass, unit);
2188 return (INVALID_UNIT);
2189 mtx_lock(&dpt->lock);
2191 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE) {
2192 mtx_unlock(&dpt->lock);
2193 return (DRIVER_DOWN);
2196 if ((channel > (dpt->channels - 1)) || (target > (dpt->max_id - 1)) ||
2197 (lun > (dpt->max_lun - 1))) {
2198 mtx_unlock(&dpt->lock);
2199 return (INVALID_SENDER);
2202 if (dpt->buffer_receiver[channel][target][lun] == NULL) {
2203 if (op == REGISTER_BUFFER) {
2204 /* Assign the requested callback */
2205 dpt->buffer_receiver[channel][target][lun] = callback;
2208 /* Process the free list */
2209 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2210 device_printf(dpt->dev,
2211 "ERROR: Cannot allocate any more free CCB's.\n"
2212 " Please try later\n");
2213 mtx_unlock(&dpt->lock);
2214 return (NO_RESOURCES);
2216 /* Now grab the newest CCB */
2217 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2218 mtx_unlock(&dpt->lock);
2219 panic("%s: Got a NULL CCB from pop_free()\n",
2220 device_get_nameunit(dpt->dev));
2223 /* Clean up the leftover of the previous tenant */
2224 ccb->status = DPT_CCB_STATE_NEW;
2225 dpt->target_ccb[channel][target][lun] = ccb;
2227 dpt->rw_buffer[channel][target][lun] =
2228 malloc(DPT_RW_BUFFER_SIZE, M_DEVBUF, M_NOWAIT);
2229 if (dpt->rw_buffer[channel][target][lun] == NULL) {
2230 device_printf(dpt->dev, "Failed to allocate "
2231 "Target-Mode buffer\n");
2232 dpt_Qpush_free(dpt, ccb);
2233 mtx_unlock(&dpt->lock);
2234 return (NO_RESOURCES);
2236 dpt_set_target(0, dpt, channel, target, lun, mode,
2237 length, offset, ccb);
2238 mtx_unlock(&dpt->lock);
2239 return (SUCCESSFULLY_REGISTERED);
2241 mtx_unlock(&dpt->lock);
2242 return (NOT_REGISTERED);
2245 if (op == REGISTER_BUFFER) {
2246 if (dpt->buffer_receiver[channel][target][lun] == callback) {
2247 mtx_unlock(&dpt->lock);
2248 return (ALREADY_REGISTERED);
2250 mtx_unlock(&dpt->lock);
2251 return (REGISTERED_TO_ANOTHER);
2254 if (dpt->buffer_receiver[channel][target][lun] == callback) {
2255 dpt->buffer_receiver[channel][target][lun] = NULL;
2256 dpt_Qpush_free(dpt, ccb);
2257 free(dpt->rw_buffer[channel][target][lun], M_DEVBUF);
2258 mtx_unlock(&dpt->lock);
2259 return (SUCCESSFULLY_REGISTERED);
2261 mtx_unlock(&dpt->lock);
2262 return (INVALID_CALLBACK);
2267 mtx_unlock(&dpt->lock);
2270 /* Return the state of the blinking DPT LED's */
2272 dpt_blinking_led(dpt_softc_t * dpt)
2279 mtx_assert(&dpt->lock, MA_OWNED);
2282 for (ndx = 0, state = 0, previous = 0;
2283 (ndx < 10) && (state != previous);
2286 state = dpt_inl(dpt, 1);
2289 if ((state == previous) && (state == DPT_BLINK_INDICATOR))
2290 result = dpt_inb(dpt, 5);
2296 * Execute a command which did not come from the kernel's SCSI layer.
2297 * The only way to map user commands to bus and target is to comply with the
2298 * standard DPT wire-down scheme:
2301 dpt_user_cmd(dpt_softc_t * dpt, eata_pt_t * user_cmd,
2302 caddr_t cmdarg, int minor_no)
2306 int channel, target, lun;
2311 mtx_assert(&dpt->lock, MA_OWNED);
2313 channel = minor2hba(minor_no);
2314 target = minor2target(minor_no);
2315 lun = minor2lun(minor_no);
2317 if ((channel > (dpt->channels - 1))
2318 || (target > dpt->max_id)
2319 || (lun > dpt->max_lun))
2322 if (target == dpt->sc_scsi_link[channel].adapter_targ) {
2323 /* This one is for the controller itself */
2324 if ((user_cmd->eataID[0] != 'E')
2325 || (user_cmd->eataID[1] != 'A')
2326 || (user_cmd->eataID[2] != 'T')
2327 || (user_cmd->eataID[3] != 'A')) {
2331 /* Get a DPT CCB, so we can prepare a command */
2333 /* Process the free list */
2334 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2335 device_printf(dpt->dev,
2336 "ERROR: Cannot allocate any more free CCB's.\n"
2337 " Please try later\n");
2340 /* Now grab the newest CCB */
2341 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2342 panic("%s: Got a NULL CCB from pop_free()\n",
2343 device_get_nameunit(dpt->dev));
2345 /* Clean up the leftover of the previous tenant */
2346 ccb->status = DPT_CCB_STATE_NEW;
2349 bcopy((caddr_t) & user_cmd->command_packet, (caddr_t) & ccb->eata_ccb,
2350 sizeof(eata_ccb_t));
2352 /* We do not want to do user specified scatter/gather. Why?? */
2353 if (ccb->eata_ccb.scatter == 1)
2356 ccb->eata_ccb.Auto_Req_Sen = 1;
2357 ccb->eata_ccb.reqlen = htonl(sizeof(struct scsi_sense_data));
2358 ccb->eata_ccb.cp_datalen = htonl(sizeof(ccb->eata_ccb.cp_datalen));
2359 ccb->eata_ccb.cp_dataDMA = htonl(vtophys(ccb->eata_ccb.cp_dataDMA));
2360 ccb->eata_ccb.cp_statDMA = htonl(vtophys(&ccb->eata_ccb.cp_statDMA));
2361 ccb->eata_ccb.cp_reqDMA = htonl(vtophys(&ccb->eata_ccb.cp_reqDMA));
2362 ccb->eata_ccb.cp_viraddr = (u_int32_t) & ccb;
2364 if (ccb->eata_ccb.DataIn || ccb->eata_ccb.DataOut) {
2365 /* Data I/O is involved in this command. Alocate buffer */
2366 if (ccb->eata_ccb.cp_datalen > PAGE_SIZE) {
2367 data = contigmalloc(ccb->eata_ccb.cp_datalen,
2368 M_TEMP, M_WAITOK, 0, ~0,
2369 ccb->eata_ccb.cp_datalen,
2372 data = malloc(ccb->eata_ccb.cp_datalen, M_TEMP,
2377 device_printf(dpt->dev, "Cannot allocate %d bytes "
2378 "for EATA command\n",
2379 ccb->eata_ccb.cp_datalen);
2382 #define usr_cmd_DMA (caddr_t)user_cmd->command_packet.cp_dataDMA
2383 if (ccb->eata_ccb.DataIn == 1) {
2384 if (copyin(usr_cmd_DMA,
2385 data, ccb->eata_ccb.cp_datalen) == -1)
2389 /* No data I/O involved here. Make sure the DPT knows that */
2390 ccb->eata_ccb.cp_datalen = 0;
2394 if (ccb->eata_ccb.FWNEST == 1)
2395 ccb->eata_ccb.FWNEST = 0;
2397 if (ccb->eata_ccb.cp_datalen != 0) {
2398 if (dpt_scatter_gather(dpt, ccb, ccb->eata_ccb.cp_datalen,
2406 * We are required to quiet a SCSI bus.
2407 * since we do not queue comands on a bus basis,
2408 * we wait for ALL commands on a controller to complete.
2409 * In the mean time, sched_queue() will not schedule new commands.
2411 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
2412 && (ccb->eata_ccb.cp_cdb[2] == BUS_QUIET)) {
2413 /* We wait for ALL traffic for this HBa to subside */
2414 dpt->state |= DPT_HA_QUIET;
2416 while ((submitted = dpt->submitted_ccbs_count) != 0) {
2417 huh = mtx_sleep((void *) dpt, &dpt->lock,
2418 PCATCH | PRIBIO, "dptqt", 100 * hz);
2421 /* Wakeup call received */
2432 /* Resume normal operation */
2433 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
2434 && (ccb->eata_ccb.cp_cdb[2] == BUS_UNQUIET)) {
2435 dpt->state &= ~DPT_HA_QUIET;
2438 * Schedule the command and submit it.
2439 * We bypass dpt_sched_queue, as it will block on DPT_HA_QUIET
2443 ccb->eata_ccb.Auto_Req_Sen = 1; /* We always want this feature */
2445 ccb->transaction_id = ++dpt->commands_processed;
2446 ccb->std_callback = (ccb_callback) dpt_user_cmd_done;
2447 ccb->result = (u_int32_t) & cmdarg;
2450 #ifdef DPT_MEASURE_PERFORMANCE
2451 ++dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd];
2452 ccb->command_started = microtime_now;
2454 dpt_Qadd_waiting(dpt, ccb);
2456 dpt_sched_queue(dpt);
2458 /* Wait for the command to complete */
2459 (void) mtx_sleep((void *) ccb, &dpt->lock, PCATCH | PRIBIO, "dptucw",
2462 /* Free allocated memory */
2470 dpt_user_cmd_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
2475 mtx_unlock(&dpt->lock);
2478 * If Auto Request Sense is on, copyout the sense struct
2480 #define usr_pckt_DMA (caddr_t)(intptr_t)ntohl(ccb->eata_ccb.cp_reqDMA)
2481 #define usr_pckt_len ntohl(ccb->eata_ccb.cp_datalen)
2482 if (ccb->eata_ccb.Auto_Req_Sen == 1) {
2483 if (copyout((caddr_t) & ccb->sense_data, usr_pckt_DMA,
2484 sizeof(struct scsi_sense_data))) {
2485 mtx_lock(&dpt->lock);
2486 ccb->result = EFAULT;
2487 dpt_Qpush_free(dpt, ccb);
2492 /* If DataIn is on, copyout the data */
2493 if ((ccb->eata_ccb.DataIn == 1)
2494 && (ccb->status_packet.hba_stat == HA_NO_ERROR)) {
2495 if (copyout(ccb->data, usr_pckt_DMA, usr_pckt_len)) {
2496 mtx_lock(&dpt->lock);
2497 dpt_Qpush_free(dpt, ccb);
2498 ccb->result = EFAULT;
2504 /* Copyout the status */
2505 result = ccb->status_packet.hba_stat;
2506 cmd_arg = (caddr_t) ccb->result;
2508 if (copyout((caddr_t) & result, cmd_arg, sizeof(result))) {
2509 mtx_lock(&dpt->lock);
2510 dpt_Qpush_free(dpt, ccb);
2511 ccb->result = EFAULT;
2515 mtx_lock(&dpt->lock);
2516 /* Put the CCB back in the freelist */
2517 ccb->state |= DPT_CCB_STATE_COMPLETED;
2518 dpt_Qpush_free(dpt, ccb);
2520 /* Free allocated memory */