2 * Copyright (c) 1997 by Simon Shapiro
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * dpt_scsi.c: SCSI dependent code for the DPT driver
36 * credits: Assisted by Mike Neuffer in the early low level DPT code
37 * Thanx to Mark Salyzyn of DPT for his assistance.
38 * Special thanx to Justin Gibbs for invaluable help in
39 * making this driver look and work like a FreeBSD component.
40 * Last but not least, many thanx to UCB and the FreeBSD
41 * team for creating and maintaining such a wonderful O/S.
43 * TODO: * Add ISA probe code.
44 * * Add driver-level RAID-0. This will allow interoperability with
45 * NiceTry, M$-Doze, Win-Dog, Slowlaris, etc., in recognizing RAID
46 * arrays that span controllers (Wow!).
53 #include <sys/param.h>
54 #include <sys/systm.h>
56 #include <sys/eventhandler.h>
57 #include <sys/malloc.h>
58 #include <sys/kernel.h>
62 #include <machine/bus.h>
64 #include <machine/resource.h>
69 #include <cam/cam_ccb.h>
70 #include <cam/cam_sim.h>
71 #include <cam/cam_xpt_sim.h>
72 #include <cam/cam_debug.h>
73 #include <cam/scsi/scsi_all.h>
74 #include <cam/scsi/scsi_message.h>
79 #include <dev/dpt/dpt.h>
81 /* dpt_isa.c, and dpt_pci.c need this in a central place */
82 devclass_t dpt_devclass;
84 #define microtime_now dpt_time_now()
86 #define dpt_inl(dpt, port) \
87 bus_read_4((dpt)->io_res, (dpt)->io_offset + port)
88 #define dpt_inb(dpt, port) \
89 bus_read_1((dpt)->io_res, (dpt)->io_offset + port)
90 #define dpt_outl(dpt, port, value) \
91 bus_write_4((dpt)->io_res, (dpt)->io_offset + port, value)
92 #define dpt_outb(dpt, port, value) \
93 bus_write_1((dpt)->io_res, (dpt)->io_offset + port, value)
96 * These will have to be setup by parameters passed at boot/load time. For
97 * performance reasons, we make them constants for the time being.
99 #define dpt_min_segs DPT_MAX_SEGS
100 #define dpt_max_segs DPT_MAX_SEGS
102 /* Definitions for our use of the SIM private CCB area */
103 #define ccb_dccb_ptr spriv_ptr0
104 #define ccb_dpt_ptr spriv_ptr1
106 /* ================= Private Inline Function declarations ===================*/
107 static __inline int dpt_just_reset(dpt_softc_t * dpt);
108 static __inline int dpt_raid_busy(dpt_softc_t * dpt);
109 static __inline int dpt_wait(dpt_softc_t *dpt, u_int bits,
111 static __inline struct dpt_ccb* dptgetccb(struct dpt_softc *dpt);
112 static __inline void dptfreeccb(struct dpt_softc *dpt,
113 struct dpt_ccb *dccb);
114 static __inline bus_addr_t dptccbvtop(struct dpt_softc *dpt,
115 struct dpt_ccb *dccb);
117 static __inline int dpt_send_immediate(dpt_softc_t *dpt,
118 eata_ccb_t *cmd_block,
119 u_int32_t cmd_busaddr,
121 u_int ifc, u_int code,
124 /* ==================== Private Function declarations =======================*/
125 static void dptmapmem(void *arg, bus_dma_segment_t *segs,
126 int nseg, int error);
128 static struct sg_map_node*
129 dptallocsgmap(struct dpt_softc *dpt);
131 static int dptallocccbs(dpt_softc_t *dpt);
133 static int dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb,
134 u_int32_t dccb_busaddr, u_int size,
135 u_int page, u_int target, int extent);
136 static void dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb,
137 u_int32_t dccb_busaddr,
140 static void dpt_poll(struct cam_sim *sim);
141 static void dpt_intr_locked(dpt_softc_t *dpt);
143 static void dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
144 int nseg, int error);
146 static void dpt_action(struct cam_sim *sim, union ccb *ccb);
148 static int dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd,
149 u_int32_t cmd_busaddr,
150 u_int command, u_int retries,
151 u_int ifc, u_int code,
153 static void dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb,
154 union ccb *ccb, u_int hba_stat,
155 u_int scsi_stat, u_int32_t resid);
157 static void dpttimeout(void *arg);
158 static void dptshutdown(void *arg, int howto);
160 /* ================= Private Inline Function definitions ====================*/
162 dpt_just_reset(dpt_softc_t * dpt)
164 if ((dpt_inb(dpt, 2) == 'D')
165 && (dpt_inb(dpt, 3) == 'P')
166 && (dpt_inb(dpt, 4) == 'T')
167 && (dpt_inb(dpt, 5) == 'H'))
174 dpt_raid_busy(dpt_softc_t * dpt)
176 if ((dpt_inb(dpt, 0) == 'D')
177 && (dpt_inb(dpt, 1) == 'P')
178 && (dpt_inb(dpt, 2) == 'T'))
185 dpt_wait(dpt_softc_t *dpt, u_int bits, u_int state)
190 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */
191 c = dpt_inb(dpt, HA_RSTATUS) & bits;
200 static __inline struct dpt_ccb*
201 dptgetccb(struct dpt_softc *dpt)
203 struct dpt_ccb* dccb;
206 mtx_assert(&dpt->lock, MA_OWNED);
207 if ((dccb = SLIST_FIRST(&dpt->free_dccb_list)) != NULL) {
208 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links);
210 } else if (dpt->total_dccbs < dpt->max_dccbs) {
212 dccb = SLIST_FIRST(&dpt->free_dccb_list);
214 device_printf(dpt->dev, "Can't malloc DCCB\n");
216 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links);
225 dptfreeccb(struct dpt_softc *dpt, struct dpt_ccb *dccb)
229 mtx_assert(&dpt->lock, MA_OWNED);
230 if ((dccb->state & DCCB_ACTIVE) != 0)
231 LIST_REMOVE(&dccb->ccb->ccb_h, sim_links.le);
232 if ((dccb->state & DCCB_RELEASE_SIMQ) != 0)
233 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
234 else if (dpt->resource_shortage != 0
235 && (dccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
236 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
237 dpt->resource_shortage = FALSE;
239 dccb->state = DCCB_FREE;
240 SLIST_INSERT_HEAD(&dpt->free_dccb_list, dccb, links);
244 static __inline bus_addr_t
245 dptccbvtop(struct dpt_softc *dpt, struct dpt_ccb *dccb)
247 return (dpt->dpt_ccb_busbase
248 + (u_int32_t)((caddr_t)dccb - (caddr_t)dpt->dpt_dccbs));
251 static __inline struct dpt_ccb *
252 dptccbptov(struct dpt_softc *dpt, bus_addr_t busaddr)
254 return (dpt->dpt_dccbs
255 + ((struct dpt_ccb *)busaddr
256 - (struct dpt_ccb *)dpt->dpt_ccb_busbase));
260 * Send a command for immediate execution by the DPT
261 * See above function for IMPORTANT notes.
264 dpt_send_immediate(dpt_softc_t *dpt, eata_ccb_t *cmd_block,
265 u_int32_t cmd_busaddr, u_int retries,
266 u_int ifc, u_int code, u_int code2)
268 return (dpt_send_eata_command(dpt, cmd_block, cmd_busaddr,
269 EATA_CMD_IMMEDIATE, retries, ifc,
274 /* ===================== Private Function definitions =======================*/
276 dptmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
278 bus_addr_t *busaddrp;
280 busaddrp = (bus_addr_t *)arg;
281 *busaddrp = segs->ds_addr;
284 static struct sg_map_node *
285 dptallocsgmap(struct dpt_softc *dpt)
287 struct sg_map_node *sg_map;
289 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
294 /* Allocate S/G space for the next batch of CCBS */
295 if (bus_dmamem_alloc(dpt->sg_dmat, (void **)&sg_map->sg_vaddr,
296 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
297 free(sg_map, M_DEVBUF);
301 (void)bus_dmamap_load(dpt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
302 PAGE_SIZE, dptmapmem, &sg_map->sg_physaddr,
305 SLIST_INSERT_HEAD(&dpt->sg_maps, sg_map, links);
311 * Allocate another chunk of CCB's. Return count of entries added.
314 dptallocccbs(dpt_softc_t *dpt)
316 struct dpt_ccb *next_ccb;
317 struct sg_map_node *sg_map;
324 mtx_assert(&dpt->lock, MA_OWNED);
325 next_ccb = &dpt->dpt_dccbs[dpt->total_dccbs];
327 if (next_ccb == dpt->dpt_dccbs) {
329 * First time through. Re-use the S/G
330 * space we allocated for initialization
333 sg_map = SLIST_FIRST(&dpt->sg_maps);
335 sg_map = dptallocsgmap(dpt);
341 segs = sg_map->sg_vaddr;
342 physaddr = sg_map->sg_physaddr;
344 newcount = (PAGE_SIZE / (dpt->sgsize * sizeof(dpt_sg_t)));
345 for (i = 0; dpt->total_dccbs < dpt->max_dccbs && i < newcount; i++) {
348 error = bus_dmamap_create(dpt->buffer_dmat, /*flags*/0,
352 callout_init_mtx(&next_ccb->timer, &dpt->lock, 0);
353 next_ccb->sg_list = segs;
354 next_ccb->sg_busaddr = htonl(physaddr);
355 next_ccb->eata_ccb.cp_dataDMA = htonl(physaddr);
356 next_ccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr);
357 next_ccb->eata_ccb.cp_reqDMA =
358 htonl(dptccbvtop(dpt, next_ccb)
359 + offsetof(struct dpt_ccb, sense_data));
360 next_ccb->eata_ccb.cp_busaddr = dpt->dpt_ccb_busend;
361 next_ccb->state = DCCB_FREE;
362 next_ccb->tag = dpt->total_dccbs;
363 SLIST_INSERT_HEAD(&dpt->free_dccb_list, next_ccb, links);
365 physaddr += (dpt->sgsize * sizeof(dpt_sg_t));
366 dpt->dpt_ccb_busend += sizeof(*next_ccb);
374 * Read a configuration page into the supplied dpt_cont_t buffer.
377 dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr,
378 u_int size, u_int page, u_int target, int extent)
387 mtx_assert(&dpt->lock, MA_OWNED);
388 cp = &dccb->eata_ccb;
389 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(*dpt->sp));
393 cp->Auto_Req_Sen = 1;
394 cp->reqlen = sizeof(struct scsi_sense_data);
397 cp->cp_LUN = 0; /* In the EATA packet */
398 cp->cp_lun = 0; /* In the SCSI command */
400 cp->cp_scsi_cmd = INQUIRY;
403 cp->cp_extent = extent;
406 cp->cp_channel = 0; /* DNC, Interpret mode is set */
408 cp->cp_datalen = htonl(size);
411 * This could be a simple for loop, but we suspected the compiler To
412 * have optimized it a bit too much. Wait for the controller to
415 while (((status = dpt_inb(dpt, HA_RSTATUS)) != (HA_SREADY | HA_SSC)
416 && (status != (HA_SREADY | HA_SSC | HA_SERROR))
417 && (status != (HA_SDRDY | HA_SERROR | HA_SDRQ)))
418 || (dpt_wait(dpt, HA_SBUSY, 0))) {
421 * RAID Drives still Spinning up? (This should only occur if
422 * the DPT controller is in a NON PC (PCI?) platform).
424 if (dpt_raid_busy(dpt)) {
425 device_printf(dpt->dev,
426 "WARNING: Get_conf() RSUS failed.\n");
431 DptStat_Reset_BUSY(dpt->sp);
434 * XXXX We might want to do something more clever than aborting at
435 * this point, like resetting (rebooting) the controller and trying
438 if ((result = dpt_send_eata_command(dpt, cp, dccb_busaddr,
439 EATA_CMD_DMA_SEND_CP,
440 10000, 0, 0, 0)) != 0) {
441 device_printf(dpt->dev,
442 "WARNING: Get_conf() failed (%d) to send "
443 "EATA_CMD_DMA_READ_CONFIG\n",
447 /* Wait for two seconds for a response. This can be slow */
450 && !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
455 /* Grab the status and clear interrupts */
456 status = dpt_inb(dpt, HA_RSTATUS);
459 * Check the status carefully. Return only if the
460 * command was successful.
462 if (((status & HA_SERROR) == 0)
463 && (dpt->sp->hba_stat == 0)
464 && (dpt->sp->scsi_stat == 0)
465 && (dpt->sp->residue_len == 0))
468 if (dpt->sp->scsi_stat == SCSI_STATUS_CHECK_COND)
474 /* Detect Cache parameters and size */
476 dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr,
486 mtx_assert(&dpt->lock, MA_OWNED);
489 * Default setting, for best performance..
490 * This is what virtually all cards default to..
492 dpt->cache_type = DPT_CACHE_WRITEBACK;
495 cp = &dccb->eata_ccb;
496 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(dpt->sp));
499 /* Setup the command structure */
502 cp->Auto_Req_Sen = 1;
503 cp->reqlen = sizeof(struct scsi_sense_data);
505 cp->cp_id = 0; /* who cares? The HBA will interpret.. */
506 cp->cp_LUN = 0; /* In the EATA packet */
507 cp->cp_lun = 0; /* In the SCSI command */
510 cp->cp_scsi_cmd = EATA_CMD_DMA_SEND_CP;
519 * Build the EATA Command Packet structure
520 * for a Log Sense Command.
522 cp->cp_cdb[0] = 0x4d;
524 cp->cp_cdb[2] = 0x40 | 0x33;
527 cp->cp_datalen = htonl(512);
529 result = dpt_send_eata_command(dpt, cp, dccb_busaddr,
530 EATA_CMD_DMA_SEND_CP,
533 device_printf(dpt->dev,
534 "WARNING: detect_cache() failed (%d) to send "
535 "EATA_CMD_DMA_SEND_CP\n", result);
538 /* Wait for two seconds for a response. This can be slow... */
541 !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
546 /* Grab the status and clear interrupts */
547 status = dpt_inb(dpt, HA_RSTATUS);
552 if (buff[0] != 0x33) {
555 bytes = DPT_HCP_LENGTH(buff);
556 param = DPT_HCP_FIRST(buff);
558 if (DPT_HCP_CODE(param) != 1) {
560 * DPT Log Page layout error
562 device_printf(dpt->dev, "NOTICE: Log Page (1) layout error\n");
565 if (!(param[4] & 0x4)) {
566 dpt->cache_type = DPT_NO_CACHE;
569 while (DPT_HCP_CODE(param) != 6) {
570 param = DPT_HCP_NEXT(param);
572 || (param >= &buff[bytes])) {
577 if (param[4] & 0x2) {
581 dpt->cache_type = DPT_NO_CACHE;
585 if (param[4] & 0x4) {
586 dpt->cache_type = DPT_CACHE_WRITETHROUGH;
589 /* XXX This isn't correct. This log parameter only has two bytes.... */
591 dpt->cache_size = param[5]
599 dpt_poll(struct cam_sim *sim)
601 dpt_intr_locked(cam_sim_softc(sim));
605 dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
607 struct dpt_ccb *dccb;
609 struct dpt_softc *dpt;
611 dccb = (struct dpt_ccb *)arg;
613 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr;
615 mtx_assert(&dpt->lock, MA_OWNED);
619 device_printf(dpt->dev,
620 "Unexepected error 0x%x returned from "
621 "bus_dmamap_load\n", error);
622 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
623 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
624 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
626 dptfreeccb(dpt, dccb);
633 bus_dma_segment_t *end_seg;
636 end_seg = dm_segs + nseg;
638 /* Copy the segments into our SG list */
640 while (dm_segs < end_seg) {
641 sg->seg_len = htonl(dm_segs->ds_len);
642 sg->seg_addr = htonl(dm_segs->ds_addr);
648 dccb->eata_ccb.scatter = 1;
649 dccb->eata_ccb.cp_dataDMA = dccb->sg_busaddr;
650 dccb->eata_ccb.cp_datalen =
651 htonl(nseg * sizeof(dpt_sg_t));
653 dccb->eata_ccb.cp_dataDMA = dccb->sg_list[0].seg_addr;
654 dccb->eata_ccb.cp_datalen = dccb->sg_list[0].seg_len;
657 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
658 op = BUS_DMASYNC_PREREAD;
660 op = BUS_DMASYNC_PREWRITE;
662 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op);
665 dccb->eata_ccb.cp_dataDMA = 0;
666 dccb->eata_ccb.cp_datalen = 0;
670 * Last time we need to check if this CCB needs to
673 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
675 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
676 dptfreeccb(dpt, dccb);
681 dccb->state |= DCCB_ACTIVE;
682 ccb->ccb_h.status |= CAM_SIM_QUEUED;
683 LIST_INSERT_HEAD(&dpt->pending_ccb_list, &ccb->ccb_h, sim_links.le);
684 callout_reset_sbt(&dccb->timer, SBT_1MS * ccb->ccb_h.timeout, 0,
685 dpttimeout, dccb, 0);
686 if (dpt_send_eata_command(dpt, &dccb->eata_ccb,
687 dccb->eata_ccb.cp_busaddr,
688 EATA_CMD_DMA_SEND_CP, 0, 0, 0, 0) != 0) {
689 ccb->ccb_h.status = CAM_NO_HBA; /* HBA dead or just busy?? */
691 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
692 dptfreeccb(dpt, dccb);
698 dpt_action(struct cam_sim *sim, union ccb *ccb)
700 struct dpt_softc *dpt;
702 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("dpt_action\n"));
704 dpt = (struct dpt_softc *)cam_sim_softc(sim);
705 mtx_assert(&dpt->lock, MA_OWNED);
707 if ((dpt->state & DPT_HA_SHUTDOWN_ACTIVE) != 0) {
708 xpt_print_path(ccb->ccb_h.path);
709 printf("controller is shutdown. Aborting CCB.\n");
710 ccb->ccb_h.status = CAM_NO_HBA;
715 switch (ccb->ccb_h.func_code) {
716 /* Common cases first */
717 case XPT_SCSI_IO: /* Execute the requested I/O operation */
719 struct ccb_scsiio *csio;
720 struct ccb_hdr *ccbh;
721 struct dpt_ccb *dccb;
722 struct eata_ccb *eccb;
726 /* Max CDB length is 12 bytes */
727 if (csio->cdb_len > 12) {
728 ccb->ccb_h.status = CAM_REQ_INVALID;
732 if ((dccb = dptgetccb(dpt)) == NULL) {
733 dpt->resource_shortage = 1;
734 xpt_freeze_simq(sim, /*count*/1);
735 ccb->ccb_h.status = CAM_REQUEUE_REQ;
739 eccb = &dccb->eata_ccb;
741 /* Link dccb and ccb so we can find one from the other */
743 ccb->ccb_h.ccb_dccb_ptr = dccb;
744 ccb->ccb_h.ccb_dpt_ptr = dpt;
747 * Explicitly set all flags so that the compiler can
748 * be smart about setting them.
750 eccb->SCSI_Reset = 0;
752 eccb->Auto_Req_Sen = (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE)
757 ccb->ccb_h.target_id == dpt->hostid[cam_sim_bus(sim)]
759 eccb->DataOut = (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0;
760 eccb->DataIn = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0;
761 eccb->reqlen = csio->sense_len;
762 eccb->cp_id = ccb->ccb_h.target_id;
763 eccb->cp_channel = cam_sim_bus(sim);
764 eccb->cp_LUN = ccb->ccb_h.target_lun;
766 eccb->cp_dispri = (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
768 eccb->cp_identify = 1;
770 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
771 && csio->tag_action != CAM_TAG_ACTION_NONE) {
772 eccb->cp_msg[0] = csio->tag_action;
773 eccb->cp_msg[1] = dccb->tag;
780 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
781 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
782 bcopy(csio->cdb_io.cdb_ptr,
783 eccb->cp_cdb, csio->cdb_len);
785 /* I guess I could map it in... */
786 ccb->ccb_h.status = CAM_REQ_INVALID;
787 dptfreeccb(dpt, dccb);
792 bcopy(csio->cdb_io.cdb_bytes,
793 eccb->cp_cdb, csio->cdb_len);
796 * If we have any data to send with this command,
797 * map it into bus space.
799 /* Only use S/G if there is a transfer */
800 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
803 error = bus_dmamap_load_ccb(dpt->buffer_dmat,
808 if (error == EINPROGRESS) {
810 * So as to maintain ordering,
811 * freeze the controller queue
812 * until our mapping is
815 xpt_freeze_simq(sim, 1);
816 dccb->state |= CAM_RELEASE_SIMQ;
821 * Does it want them both on or both off?
822 * CAM_DIR_NONE is both on, so this code can
823 * be removed if this is also what the DPT
828 dptexecuteccb(dccb, NULL, 0, 0);
832 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
833 case XPT_ABORT: /* Abort the specified CCB */
835 ccb->ccb_h.status = CAM_REQ_INVALID;
838 case XPT_SET_TRAN_SETTINGS:
840 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
844 case XPT_GET_TRAN_SETTINGS:
845 /* Get default/user set transfer settings for the target */
847 struct ccb_trans_settings *cts = &ccb->cts;
848 struct ccb_trans_settings_scsi *scsi =
849 &cts->proto_specific.scsi;
850 struct ccb_trans_settings_spi *spi =
851 &cts->xport_specific.spi;
853 cts->protocol = PROTO_SCSI;
854 cts->protocol_version = SCSI_REV_2;
855 cts->transport = XPORT_SPI;
856 cts->transport_version = 2;
858 if (cts->type == CTS_TYPE_USER_SETTINGS) {
859 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
860 spi->bus_width = (dpt->max_id > 7)
861 ? MSG_EXT_WDTR_BUS_8_BIT
862 : MSG_EXT_WDTR_BUS_16_BIT;
863 spi->sync_period = 25; /* 10MHz */
864 if (spi->sync_period != 0)
865 spi->sync_offset = 15;
866 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
868 spi->valid = CTS_SPI_VALID_SYNC_RATE
869 | CTS_SPI_VALID_SYNC_OFFSET
870 | CTS_SPI_VALID_BUS_WIDTH
871 | CTS_SPI_VALID_DISC;
872 scsi->valid = CTS_SCSI_VALID_TQ;
873 ccb->ccb_h.status = CAM_REQ_CMP;
875 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
880 case XPT_CALC_GEOMETRY:
883 * XXX Use Adaptec translation until I find out how to
884 * get this information from the card.
886 cam_calc_geometry(&ccb->ccg, /*extended*/1);
890 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
893 ccb->ccb_h.status = CAM_REQ_CMP;
897 case XPT_TERM_IO: /* Terminate the I/O process */
899 ccb->ccb_h.status = CAM_REQ_INVALID;
902 case XPT_PATH_INQ: /* Path routing inquiry */
904 struct ccb_pathinq *cpi = &ccb->cpi;
906 cpi->version_num = 1;
907 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
909 cpi->hba_inquiry |= PI_WIDE_16;
910 cpi->target_sprt = 0;
912 cpi->hba_eng_cnt = 0;
913 cpi->max_target = dpt->max_id;
914 cpi->max_lun = dpt->max_lun;
915 cpi->initiator_id = dpt->hostid[cam_sim_bus(sim)];
916 cpi->bus_id = cam_sim_bus(sim);
917 cpi->base_transfer_speed = 3300;
918 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
919 strlcpy(cpi->hba_vid, "DPT", HBA_IDLEN);
920 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
921 cpi->unit_number = cam_sim_unit(sim);
922 cpi->transport = XPORT_SPI;
923 cpi->transport_version = 2;
924 cpi->protocol = PROTO_SCSI;
925 cpi->protocol_version = SCSI_REV_2;
926 cpi->ccb_h.status = CAM_REQ_CMP;
931 ccb->ccb_h.status = CAM_REQ_INVALID;
938 * This routine will try to send an EATA command to the DPT HBA.
939 * It will, by default, try 20,000 times, waiting 50us between tries.
940 * It returns 0 on success and 1 on failure.
943 dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd_block,
944 u_int32_t cmd_busaddr, u_int command, u_int retries,
945 u_int ifc, u_int code, u_int code2)
953 * I hate this polling nonsense. Wish there was a way to tell the DPT
954 * to go get commands at its own pace, or to interrupt when ready.
955 * In the mean time we will measure how many itterations it really
958 for (loop = 0; loop < retries; loop++) {
959 if ((dpt_inb(dpt, HA_RAUXSTAT) & HA_ABUSY) == 0)
965 if (loop < retries) {
966 #ifdef DPT_MEASURE_PERFORMANCE
967 if (loop > dpt->performance.max_eata_tries)
968 dpt->performance.max_eata_tries = loop;
970 if (loop < dpt->performance.min_eata_tries)
971 dpt->performance.min_eata_tries = loop;
974 #ifdef DPT_MEASURE_PERFORMANCE
975 ++dpt->performance.command_too_busy;
980 /* The controller is alive, advance the wedge timer */
982 dpt->last_contact = microtime_now;
985 if (cmd_block == NULL)
987 #if (BYTE_ORDER == BIG_ENDIAN)
989 cmd_busaddr = ((cmd_busaddr >> 24) & 0xFF)
990 | ((cmd_busaddr >> 16) & 0xFF)
991 | ((cmd_busaddr >> 8) & 0xFF)
992 | (cmd_busaddr & 0xFF);
995 /* And now the address */
996 dpt_outl(dpt, HA_WDMAADDR, cmd_busaddr);
998 if (command == EATA_CMD_IMMEDIATE) {
999 if (cmd_block == NULL) {
1000 dpt_outb(dpt, HA_WCODE2, code2);
1001 dpt_outb(dpt, HA_WCODE, code);
1003 dpt_outb(dpt, HA_WIFC, ifc);
1005 dpt_outb(dpt, HA_WCOMMAND, command);
1011 /* ==================== Exported Function definitions =======================*/
1013 dpt_alloc(device_t dev)
1015 dpt_softc_t *dpt = device_get_softc(dev);
1018 mtx_init(&dpt->lock, "dpt", NULL, MTX_DEF);
1019 SLIST_INIT(&dpt->free_dccb_list);
1020 LIST_INIT(&dpt->pending_ccb_list);
1021 for (i = 0; i < MAX_CHANNELS; i++)
1022 dpt->resetlevel[i] = DPT_HA_OK;
1024 #ifdef DPT_MEASURE_PERFORMANCE
1025 dpt_reset_performance(dpt);
1026 #endif /* DPT_MEASURE_PERFORMANCE */
1031 dpt_free(struct dpt_softc *dpt)
1033 switch (dpt->init_level) {
1036 bus_dmamap_unload(dpt->dccb_dmat, dpt->dccb_dmamap);
1038 bus_dmamem_free(dpt->dccb_dmat, dpt->dpt_dccbs,
1041 bus_dma_tag_destroy(dpt->dccb_dmat);
1043 bus_dma_tag_destroy(dpt->buffer_dmat);
1046 struct sg_map_node *sg_map;
1048 while ((sg_map = SLIST_FIRST(&dpt->sg_maps)) != NULL) {
1049 SLIST_REMOVE_HEAD(&dpt->sg_maps, links);
1050 bus_dmamap_unload(dpt->sg_dmat,
1052 bus_dmamem_free(dpt->sg_dmat, sg_map->sg_vaddr,
1054 free(sg_map, M_DEVBUF);
1056 bus_dma_tag_destroy(dpt->sg_dmat);
1061 mtx_destroy(&dpt->lock);
1065 dpt_alloc_resources (device_t dev)
1070 dpt = device_get_softc(dev);
1072 dpt->io_res = bus_alloc_resource_any(dev, dpt->io_type, &dpt->io_rid,
1074 if (dpt->io_res == NULL) {
1075 device_printf(dev, "No I/O space?!\n");
1080 dpt->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dpt->irq_rid,
1082 if (dpt->irq_res == NULL) {
1083 device_printf(dev, "No IRQ!\n");
1095 dpt_release_resources (device_t dev)
1097 struct dpt_softc * dpt;
1099 dpt = device_get_softc(dev);
1102 bus_teardown_intr(dev, dpt->irq_res, dpt->ih);
1104 bus_release_resource(dev, dpt->io_type, dpt->io_rid, dpt->io_res);
1106 bus_release_resource(dev, SYS_RES_IRQ, dpt->irq_rid, dpt->irq_res);
1108 bus_release_resource(dev, SYS_RES_DRQ, dpt->drq_rid, dpt->drq_res);
1113 static u_int8_t string_sizes[] =
1115 sizeof(((dpt_inq_t*)NULL)->vendor),
1116 sizeof(((dpt_inq_t*)NULL)->modelNum),
1117 sizeof(((dpt_inq_t*)NULL)->firmware),
1118 sizeof(((dpt_inq_t*)NULL)->protocol),
1122 dpt_init(struct dpt_softc *dpt)
1125 struct sg_map_node *sg_map;
1132 dpt->init_level = 0;
1133 SLIST_INIT(&dpt->sg_maps);
1134 mtx_lock(&dpt->lock);
1136 #ifdef DPT_RESET_BOARD
1137 device_printf(dpt->dev, "resetting HBA\n");
1138 dpt_outb(dpt, HA_WCOMMAND, EATA_CMD_RESET);
1140 /* XXX Shouldn't we poll a status register or something??? */
1142 /* DMA tag for our S/G structures. We allocate in page sized chunks */
1143 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat,
1146 /* lowaddr */ BUS_SPACE_MAXADDR,
1147 /* highaddr */ BUS_SPACE_MAXADDR,
1149 /* filterarg */ NULL,
1150 /* maxsize */ PAGE_SIZE,
1152 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1154 /* lockfunc */ NULL,
1156 &dpt->sg_dmat) != 0) {
1163 * We allocate our DPT ccbs as a contiguous array of bus dma'able
1164 * memory. To get the allocation size, we need to know how many
1165 * ccbs the card supports. This requires a ccb. We solve this
1166 * chicken and egg problem by allocating some re-usable S/G space
1167 * up front, and treating it as our status packet, CCB, and target
1168 * memory space for these commands.
1170 sg_map = dptallocsgmap(dpt);
1174 dpt->sp = (volatile dpt_sp_t *)sg_map->sg_vaddr;
1175 dccb = (struct dpt_ccb *)(uintptr_t)(volatile void *)&dpt->sp[1];
1176 bzero(dccb, sizeof(*dccb));
1177 dpt->sp_physaddr = sg_map->sg_physaddr;
1178 dccb->eata_ccb.cp_dataDMA =
1179 htonl(sg_map->sg_physaddr + sizeof(dpt_sp_t) + sizeof(*dccb));
1180 dccb->eata_ccb.cp_busaddr = ~0;
1181 dccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr);
1182 dccb->eata_ccb.cp_reqDMA = htonl(dpt->sp_physaddr + sizeof(*dccb)
1183 + offsetof(struct dpt_ccb, sense_data));
1185 /* Okay. Fetch our config */
1186 bzero(&dccb[1], sizeof(conf)); /* data area */
1187 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1188 sizeof(conf), 0xc1, 7, 1);
1191 device_printf(dpt->dev, "Failed to get board configuration\n");
1194 bcopy(&dccb[1], &conf, sizeof(conf));
1196 bzero(&dccb[1], sizeof(dpt->board_data));
1197 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1198 sizeof(dpt->board_data), 0, conf.scsi_id0, 0);
1200 device_printf(dpt->dev, "Failed to get inquiry information\n");
1203 bcopy(&dccb[1], &dpt->board_data, sizeof(dpt->board_data));
1205 dpt_detect_cache(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1206 (u_int8_t *)&dccb[1]);
1208 switch (ntohl(conf.splen)) {
1210 dpt->EATA_revision = 'a';
1213 dpt->EATA_revision = 'b';
1216 dpt->EATA_revision = 'c';
1219 dpt->EATA_revision = 'z';
1222 dpt->EATA_revision = '?';
1225 dpt->max_id = conf.MAX_ID;
1226 dpt->max_lun = conf.MAX_LUN;
1227 dpt->irq = conf.IRQ;
1228 dpt->dma_channel = (8 - conf.DMA_channel) & 7;
1229 dpt->channels = conf.MAX_CHAN + 1;
1230 dpt->state |= DPT_HA_OK;
1232 dpt->primary = FALSE;
1234 dpt->primary = TRUE;
1236 dpt->more_support = conf.MORE_support;
1238 if (strncmp(dpt->board_data.firmware, "07G0", 4) >= 0)
1239 dpt->immediate_support = 1;
1241 dpt->immediate_support = 0;
1243 dpt->cplen = ntohl(conf.cplen);
1244 dpt->cppadlen = ntohs(conf.cppadlen);
1245 dpt->max_dccbs = ntohs(conf.queuesiz);
1247 if (dpt->max_dccbs > 256) {
1248 device_printf(dpt->dev, "Max CCBs reduced from %d to "
1249 "256 due to tag algorithm\n", dpt->max_dccbs);
1250 dpt->max_dccbs = 256;
1253 dpt->hostid[0] = conf.scsi_id0;
1254 dpt->hostid[1] = conf.scsi_id1;
1255 dpt->hostid[2] = conf.scsi_id2;
1260 dpt->sgsize = ntohs(conf.SGsiz);
1262 /* We can only get 64k buffers, so don't bother to waste space. */
1263 if (dpt->sgsize < 17 || dpt->sgsize > 32)
1266 if (dpt->sgsize > dpt_max_segs)
1267 dpt->sgsize = dpt_max_segs;
1269 /* DMA tag for mapping buffers into device visible space. */
1270 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat,
1273 /* lowaddr */ BUS_SPACE_MAXADDR,
1274 /* highaddr */ BUS_SPACE_MAXADDR,
1276 /* filterarg */ NULL,
1277 /* maxsize */ DFLTPHYS,
1278 /* nsegments */ dpt->sgsize,
1279 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1280 /* flags */ BUS_DMA_ALLOCNOW,
1281 /* lockfunc */ busdma_lock_mutex,
1282 /* lockarg */ &dpt->lock,
1283 &dpt->buffer_dmat) != 0) {
1284 device_printf(dpt->dev,
1285 "bus_dma_tag_create(...,dpt->buffer_dmat) failed\n");
1291 /* DMA tag for our ccb structures and interrupt status packet */
1292 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat,
1295 /* lowaddr */ BUS_SPACE_MAXADDR,
1296 /* highaddr */ BUS_SPACE_MAXADDR,
1298 /* filterarg */ NULL,
1299 /* maxsize */ (dpt->max_dccbs *
1300 sizeof(struct dpt_ccb)) +
1303 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1305 /* lockfunc */ NULL,
1307 &dpt->dccb_dmat) != 0) {
1308 device_printf(dpt->dev,
1309 "bus_dma_tag_create(...,dpt->dccb_dmat) failed\n");
1315 /* Allocation for our ccbs and interrupt status packet */
1316 if (bus_dmamem_alloc(dpt->dccb_dmat, (void **)&dpt->dpt_dccbs,
1317 BUS_DMA_NOWAIT, &dpt->dccb_dmamap) != 0) {
1318 device_printf(dpt->dev,
1319 "bus_dmamem_alloc(dpt->dccb_dmat,...) failed\n");
1325 /* And permanently map them */
1326 bus_dmamap_load(dpt->dccb_dmat, dpt->dccb_dmamap,
1328 (dpt->max_dccbs * sizeof(struct dpt_ccb))
1330 dptmapmem, &dpt->dpt_ccb_busbase, /*flags*/0);
1332 /* Clear them out. */
1333 bzero(dpt->dpt_dccbs,
1334 (dpt->max_dccbs * sizeof(struct dpt_ccb)) + sizeof(dpt_sp_t));
1336 dpt->dpt_ccb_busend = dpt->dpt_ccb_busbase;
1338 dpt->sp = (dpt_sp_t*)&dpt->dpt_dccbs[dpt->max_dccbs];
1339 dpt->sp_physaddr = dpt->dpt_ccb_busbase
1340 + (dpt->max_dccbs * sizeof(dpt_ccb_t));
1343 /* Allocate our first batch of ccbs */
1344 if (dptallocccbs(dpt) == 0) {
1345 device_printf(dpt->dev, "dptallocccbs(dpt) == 0\n");
1346 mtx_unlock(&dpt->lock);
1350 /* Prepare for Target Mode */
1351 dpt->target_mode_enabled = 1;
1353 /* Nuke excess spaces from inquiry information */
1354 strp = dpt->board_data.vendor;
1355 for (i = 0; i < sizeof(string_sizes); i++) {
1356 index = string_sizes[i] - 1;
1357 while (index && (strp[index] == ' '))
1358 strp[index--] = '\0';
1359 strp += string_sizes[i];
1362 device_printf(dpt->dev, "%.8s %.16s FW Rev. %.4s, ",
1363 dpt->board_data.vendor,
1364 dpt->board_data.modelNum, dpt->board_data.firmware);
1366 printf("%d channel%s, ", dpt->channels, dpt->channels > 1 ? "s" : "");
1368 if (dpt->cache_type != DPT_NO_CACHE
1369 && dpt->cache_size != 0) {
1370 printf("%s Cache, ",
1371 dpt->cache_type == DPT_CACHE_WRITETHROUGH
1372 ? "Write-Through" : "Write-Back");
1375 printf("%d CCBs\n", dpt->max_dccbs);
1376 mtx_unlock(&dpt->lock);
1380 mtx_unlock(&dpt->lock);
1385 dpt_attach(dpt_softc_t *dpt)
1387 struct cam_devq *devq;
1391 * Create the device queue for our SIM.
1393 devq = cam_simq_alloc(dpt->max_dccbs);
1397 mtx_lock(&dpt->lock);
1398 for (i = 0; i < dpt->channels; i++) {
1400 * Construct our SIM entry
1402 dpt->sims[i] = cam_sim_alloc(dpt_action, dpt_poll, "dpt",
1403 dpt, device_get_unit(dpt->dev), &dpt->lock,
1405 /*tagged*/dpt->max_dccbs, devq);
1406 if (dpt->sims[i] == NULL) {
1408 cam_simq_free(devq);
1410 printf( "%s(): Unable to attach bus %d "
1411 "due to resource shortage\n",
1416 if (xpt_bus_register(dpt->sims[i], dpt->dev, i) != CAM_SUCCESS){
1417 cam_sim_free(dpt->sims[i], /*free_devq*/i == 0);
1418 dpt->sims[i] = NULL;
1422 if (xpt_create_path(&dpt->paths[i], /*periph*/NULL,
1423 cam_sim_path(dpt->sims[i]),
1424 CAM_TARGET_WILDCARD,
1425 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1426 xpt_bus_deregister(cam_sim_path(dpt->sims[i]));
1427 cam_sim_free(dpt->sims[i], /*free_devq*/i == 0);
1428 dpt->sims[i] = NULL;
1433 mtx_unlock(&dpt->lock);
1435 EVENTHANDLER_REGISTER(shutdown_final, dptshutdown,
1436 dpt, SHUTDOWN_PRI_DEFAULT);
1441 dpt_detach (device_t dev)
1443 struct dpt_softc * dpt;
1446 dpt = device_get_softc(dev);
1448 mtx_lock(&dpt->lock);
1449 for (i = 0; i < dpt->channels; i++) {
1451 xpt_async(AC_LOST_DEVICE, dpt->paths[i], NULL);
1453 xpt_free_path(dpt->paths[i]);
1454 xpt_bus_deregister(cam_sim_path(dpt->sims[i]));
1455 cam_sim_free(dpt->sims[i], /*free_devq*/TRUE);
1457 mtx_unlock(&dpt->lock);
1459 dptshutdown((void *)dpt, SHUTDOWN_PRI_DEFAULT);
1461 dpt_release_resources(dev);
1469 * This is the interrupt handler for the DPT driver.
1477 mtx_lock(&dpt->lock);
1478 dpt_intr_locked(dpt);
1479 mtx_unlock(&dpt->lock);
1483 dpt_intr_locked(dpt_softc_t *dpt)
1491 u_int32_t residue_len; /* Number of bytes not transferred */
1493 /* First order of business is to check if this interrupt is for us */
1494 while (((aux_status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ) != 0) {
1497 * What we want to do now, is to capture the status, all of it,
1498 * move it where it belongs, wake up whoever sleeps waiting to
1499 * process this result, and get out of here.
1501 if (dpt->sp->ccb_busaddr < dpt->dpt_ccb_busbase
1502 || dpt->sp->ccb_busaddr >= dpt->dpt_ccb_busend) {
1503 device_printf(dpt->dev,
1504 "Encountered bogus status packet\n");
1505 status = dpt_inb(dpt, HA_RSTATUS);
1509 dccb = dptccbptov(dpt, dpt->sp->ccb_busaddr);
1511 dpt->sp->ccb_busaddr = ~0;
1513 /* Ignore status packets with EOC not set */
1514 if (dpt->sp->EOC == 0) {
1515 device_printf(dpt->dev,
1516 "ERROR: Request %d received with "
1517 "clear EOC.\n Marking as LOST.\n",
1518 dccb->transaction_id);
1520 /* This CLEARS the interrupt! */
1521 status = dpt_inb(dpt, HA_RSTATUS);
1527 * Double buffer the status information so the hardware can
1528 * work on updating the status packet while we decifer the
1529 * one we were just interrupted for.
1530 * According to Mark Salyzyn, we only need few pieces of it.
1532 hba_stat = dpt->sp->hba_stat;
1533 scsi_stat = dpt->sp->scsi_stat;
1534 residue_len = dpt->sp->residue_len;
1536 /* Clear interrupts, check for error */
1537 if ((status = dpt_inb(dpt, HA_RSTATUS)) & HA_SERROR) {
1539 * Error Condition. Check for magic cookie. Exit
1540 * this test on earliest sign of non-reset condition
1543 /* Check that this is not a board reset interrupt */
1544 if (dpt_just_reset(dpt)) {
1545 device_printf(dpt->dev, "HBA rebooted.\n"
1546 " All transactions should be "
1549 device_printf(dpt->dev,
1550 ">>---->> This is incomplete, "
1551 "fix me.... <<----<<");
1552 panic("DPT Rebooted");
1558 callout_stop(&dccb->timer);
1559 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1560 bus_dmasync_op_t op;
1562 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1563 op = BUS_DMASYNC_POSTREAD;
1565 op = BUS_DMASYNC_POSTWRITE;
1566 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op);
1567 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
1570 /* Common Case inline... */
1571 if (hba_stat == HA_NO_ERROR) {
1572 ccb->csio.scsi_status = scsi_stat;
1573 ccb->ccb_h.status = 0;
1574 switch (scsi_stat) {
1575 case SCSI_STATUS_OK:
1576 ccb->ccb_h.status |= CAM_REQ_CMP;
1578 case SCSI_STATUS_CHECK_COND:
1579 case SCSI_STATUS_CMD_TERMINATED:
1580 bcopy(&dccb->sense_data, &ccb->csio.sense_data,
1581 ccb->csio.sense_len);
1582 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1585 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1586 /* XXX Freeze DevQ */
1589 ccb->csio.resid = residue_len;
1590 dptfreeccb(dpt, dccb);
1593 dptprocesserror(dpt, dccb, ccb, hba_stat, scsi_stat,
1600 dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, union ccb *ccb,
1601 u_int hba_stat, u_int scsi_stat, u_int32_t resid)
1603 ccb->csio.resid = resid;
1606 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1609 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1611 case HA_SCSIBUS_RESET:
1612 case HA_HBA_POWER_UP: /* Similar effect to a bus reset??? */
1613 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1616 case HA_CP_RESET: /* XXX ??? */
1617 case HA_CP_ABORT_NA: /* XXX ??? */
1618 case HA_CP_RESET_NA: /* XXX ??? */
1619 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1620 ccb->ccb_h.status = CAM_REQ_ABORTED;
1625 case HA_PCI_STABORT:
1629 ccb->ccb_h.status = CAM_UNCOR_PARITY;
1631 case HA_UNX_MSGRJCT:
1632 ccb->ccb_h.status = CAM_MSG_REJECT_REC;
1634 case HA_UNX_BUSPHASE:
1635 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1637 case HA_UNX_BUS_FREE:
1638 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1641 case HA_RESET_STUCK:
1643 * Dead??? Can the controller get unstuck
1644 * from these conditions
1646 ccb->ccb_h.status = CAM_NO_HBA;
1648 case HA_RSENSE_FAIL:
1649 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1652 device_printf(dpt->dev, "Undocumented Error %x\n", hba_stat);
1653 printf("Please mail this message to shimon@simon-shapiro.org\n");
1654 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1657 dptfreeccb(dpt, dccb);
1662 dpttimeout(void *arg)
1664 struct dpt_ccb *dccb;
1666 struct dpt_softc *dpt;
1668 dccb = (struct dpt_ccb *)arg;
1670 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr;
1671 mtx_assert(&dpt->lock, MA_OWNED);
1672 xpt_print_path(ccb->ccb_h.path);
1673 printf("CCB %p - timed out\n", (void *)dccb);
1676 * Try to clear any pending jobs. FreeBSD will lose interrupts,
1677 * leaving the controller suspended, and commands timed-out.
1678 * By calling the interrupt handler, any command thus stuck will be
1681 dpt_intr_locked(dpt);
1683 if ((dccb->state & DCCB_ACTIVE) == 0) {
1684 xpt_print_path(ccb->ccb_h.path);
1685 printf("CCB %p - timed out CCB already completed\n",
1690 /* Abort this particular command. Leave all others running */
1691 dpt_send_immediate(dpt, &dccb->eata_ccb, dccb->eata_ccb.cp_busaddr,
1692 /*retries*/20000, EATA_SPECIFIC_ABORT, 0, 0);
1693 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1697 * Shutdown the controller and ensure that the cache is completely flushed.
1698 * Called from the shutdown_final event after all disk access has completed.
1701 dptshutdown(void *arg, int howto)
1705 dpt = (dpt_softc_t *)arg;
1707 device_printf(dpt->dev,
1708 "Shutting down (mode %x) HBA. Please wait...\n", howto);
1711 * What we do for a shutdown, is give the DPT early power loss warning
1713 mtx_lock(&dpt->lock);
1714 dpt_send_immediate(dpt, NULL, 0, EATA_POWER_OFF_WARN, 0, 0, 0);
1715 mtx_unlock(&dpt->lock);
1716 DELAY(1000 * 1000 * 5);
1717 device_printf(dpt->dev, "Controller was warned of shutdown and is now "
1721 /*============================================================================*/
1724 #ifdef DPT_RESET_HBA
1727 ** Function name : dpt_reset_hba
1729 ** Description : Reset the HBA and properly discard all pending work
1734 dpt_reset_hba(dpt_softc_t *dpt)
1737 dpt_ccb_t dccb, *dccbp;
1739 struct scsi_xfer *xs;
1741 mtx_assert(&dpt->lock, MA_OWNED);
1743 /* Prepare a control block. The SCSI command part is immaterial */
1746 dccb.state = DPT_CCB_STATE_NEW;
1747 dccb.std_callback = NULL;
1748 dccb.wrbuff_callback = NULL;
1750 ccb = &dccb.eata_ccb;
1751 ccb->CP_OpCode = EATA_CMD_RESET;
1752 ccb->SCSI_Reset = 0;
1754 ccb->Auto_Req_Sen = 1;
1755 ccb->cp_id = 0; /* Should be ignored */
1759 ccb->reqlen = htonl(sizeof(struct scsi_sense_data));
1760 ccb->cp_statDMA = htonl(vtophys(&ccb->cp_statDMA));
1761 ccb->cp_reqDMA = htonl(vtophys(&ccb->cp_reqDMA));
1762 ccb->cp_viraddr = (u_int32_t) & ccb;
1764 ccb->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
1765 ccb->cp_scsi_cmd = 0; /* Should be ignored */
1767 /* Lock up the submitted queue. We are very persistent here */
1768 while (dpt->queue_status & DPT_SUBMITTED_QUEUE_ACTIVE) {
1772 dpt->queue_status |= DPT_SUBMITTED_QUEUE_ACTIVE;
1774 /* Send the RESET message */
1775 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb,
1776 EATA_CMD_RESET, 0, 0, 0, 0)) != 0) {
1777 device_printf(dpt->dev, "Failed to send the RESET message.\n"
1778 " Trying cold boot (ouch!)\n");
1781 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb,
1782 EATA_COLD_BOOT, 0, 0,
1784 panic("%s: Faild to cold boot the HBA\n",
1785 device_get_nameunit(dpt->dev));
1787 #ifdef DPT_MEASURE_PERFORMANCE
1788 dpt->performance.cold_boots++;
1789 #endif /* DPT_MEASURE_PERFORMANCE */
1792 #ifdef DPT_MEASURE_PERFORMANCE
1793 dpt->performance.warm_starts++;
1794 #endif /* DPT_MEASURE_PERFORMANCE */
1796 device_printf(dpt->dev,
1797 "Aborting pending requests. O/S should re-submit\n");
1799 while ((dccbp = TAILQ_FIRST(&dpt->completed_ccbs)) != NULL) {
1800 struct scsi_xfer *xs = dccbp->xs;
1802 /* Not all transactions have xs structs */
1804 /* Tell the kernel proper this did not complete well */
1805 xs->error |= XS_SELTIMEOUT;
1806 xs->flags |= SCSI_ITSDONE;
1810 dpt_Qremove_submitted(dpt, dccbp);
1812 /* Remember, Callbacks are NOT in the standard queue */
1813 if (dccbp->std_callback != NULL) {
1814 (dccbp->std_callback)(dpt, dccbp->eata_ccb.cp_channel,
1817 dpt_Qpush_free(dpt, dccbp);
1821 device_printf(dpt->dev, "reset done aborting all pending commands\n");
1822 dpt->queue_status &= ~DPT_SUBMITTED_QUEUE_ACTIVE;
1825 #endif /* DPT_RESET_HBA */
1828 * Build a Command Block for target mode READ/WRITE BUFFER,
1829 * with the ``sync'' bit ON.
1831 * Although the length and offset are 24 bit fields in the command, they cannot
1832 * exceed 8192 bytes, so we take them as short integers andcheck their range.
1833 * If they are sensless, we round them to zero offset, maximum length and
1838 dpt_target_ccb(dpt_softc_t * dpt, int bus, u_int8_t target, u_int8_t lun,
1839 dpt_ccb_t * ccb, int mode, u_int8_t command,
1840 u_int16_t length, u_int16_t offset)
1844 mtx_assert(&dpt->lock, MA_OWNED);
1845 if ((length + offset) > DPT_MAX_TARGET_MODE_BUFFER_SIZE) {
1846 device_printf(dpt->dev,
1847 "Length of %d, and offset of %d are wrong\n",
1849 length = DPT_MAX_TARGET_MODE_BUFFER_SIZE;
1854 ccb->state = DPT_CCB_STATE_NEW;
1855 ccb->std_callback = (ccb_callback) dpt_target_done;
1856 ccb->wrbuff_callback = NULL;
1858 cp = &ccb->eata_ccb;
1859 cp->CP_OpCode = EATA_CMD_DMA_SEND_CP;
1862 cp->Auto_Req_Sen = 1;
1867 cp->reqlen = htonl(sizeof(struct scsi_sense_data));
1868 cp->cp_statDMA = htonl(vtophys(&cp->cp_statDMA));
1869 cp->cp_reqDMA = htonl(vtophys(&cp->cp_reqDMA));
1870 cp->cp_viraddr = (u_int32_t) & ccb;
1872 cp->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
1874 cp->cp_scsi_cmd = command;
1875 cp->cp_cdb[1] = (u_int8_t) (mode & SCSI_TM_MODE_MASK);
1876 cp->cp_lun = lun; /* Order is important here! */
1877 cp->cp_cdb[2] = 0x00; /* Buffer Id, only 1 :-( */
1878 cp->cp_cdb[3] = (length >> 16) & 0xFF; /* Buffer offset MSB */
1879 cp->cp_cdb[4] = (length >> 8) & 0xFF;
1880 cp->cp_cdb[5] = length & 0xFF;
1881 cp->cp_cdb[6] = (length >> 16) & 0xFF; /* Length MSB */
1882 cp->cp_cdb[7] = (length >> 8) & 0xFF;
1883 cp->cp_cdb[8] = length & 0xFF; /* Length LSB */
1884 cp->cp_cdb[9] = 0; /* No sync, no match bits */
1887 * This could be optimized to live in dpt_register_buffer.
1888 * We keep it here, just in case the kernel decides to reallocate pages
1890 if (dpt_scatter_gather(dpt, ccb, DPT_RW_BUFFER_SIZE,
1891 dpt->rw_buffer[bus][target][lun])) {
1892 device_printf(dpt->dev, "Failed to setup Scatter/Gather for "
1893 "Target-Mode buffer\n");
1897 /* Setup a target mode READ command */
1900 dpt_set_target(int redo, dpt_softc_t * dpt,
1901 u_int8_t bus, u_int8_t target, u_int8_t lun, int mode,
1902 u_int16_t length, u_int16_t offset, dpt_ccb_t * ccb)
1905 mtx_assert(&dpt->lock, MA_OWNED);
1906 if (dpt->target_mode_enabled) {
1908 dpt_target_ccb(dpt, bus, target, lun, ccb, mode,
1909 SCSI_TM_READ_BUFFER, length, offset);
1911 ccb->transaction_id = ++dpt->commands_processed;
1913 #ifdef DPT_MEASURE_PERFORMANCE
1914 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++;
1915 ccb->command_started = microtime_now;
1917 dpt_Qadd_waiting(dpt, ccb);
1918 dpt_sched_queue(dpt);
1920 device_printf(dpt->dev,
1921 "Target Mode Request, but Target Mode is OFF\n");
1926 * Schedule a buffer to be sent to another target.
1927 * The work will be scheduled and the callback provided will be called when
1928 * the work is actually done.
1930 * Please NOTE: ``Anyone'' can send a buffer, but only registered clients
1931 * get notified of receipt of buffers.
1935 dpt_send_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun,
1936 u_int8_t mode, u_int16_t length, u_int16_t offset, void *data,
1937 buff_wr_done callback)
1940 dpt_ccb_t *ccb = NULL;
1942 /* This is an external call. Be a bit paranoid */
1943 dpt = devclass_get_device(dpt_devclass, unit);
1945 return (INVALID_UNIT);
1947 mtx_lock(&dpt->lock);
1948 if (dpt->target_mode_enabled) {
1949 if ((channel >= dpt->channels) || (target > dpt->max_id) ||
1950 (lun > dpt->max_lun)) {
1951 mtx_unlock(&dpt->lock);
1952 return (INVALID_SENDER);
1954 if ((dpt->rw_buffer[channel][target][lun] == NULL) ||
1955 (dpt->buffer_receiver[channel][target][lun] == NULL)) {
1956 mtx_unlock(&dpt->lock);
1957 return (NOT_REGISTERED);
1960 /* Process the free list */
1961 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
1962 device_printf(dpt->dev,
1963 "ERROR: Cannot allocate any more free CCB's.\n"
1964 " Please try later\n");
1965 mtx_unlock(&dpt->lock);
1966 return (NO_RESOURCES);
1968 /* Now grab the newest CCB */
1969 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
1970 mtx_unlock(&dpt->lock);
1971 panic("%s: Got a NULL CCB from pop_free()\n",
1972 device_get_nameunit(dpt->dev));
1975 bcopy(dpt->rw_buffer[channel][target][lun] + offset, data, length);
1976 dpt_target_ccb(dpt, channel, target, lun, ccb, mode,
1977 SCSI_TM_WRITE_BUFFER,
1979 ccb->std_callback = (ccb_callback) callback; /* Potential trouble */
1981 ccb->transaction_id = ++dpt->commands_processed;
1983 #ifdef DPT_MEASURE_PERFORMANCE
1984 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++;
1985 ccb->command_started = microtime_now;
1987 dpt_Qadd_waiting(dpt, ccb);
1988 dpt_sched_queue(dpt);
1990 mtx_unlock(&dpt->lock);
1993 mtx_unlock(&dpt->lock);
1994 return (DRIVER_DOWN);
1998 dpt_target_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
2002 cp = &ccb->eata_ccb;
2005 * Remove the CCB from the waiting queue.
2006 * We do NOT put it back on the free, etc., queues as it is a special
2007 * ccb, owned by the dpt_softc of this unit.
2009 dpt_Qremove_completed(dpt, ccb);
2011 #define br_channel (ccb->eata_ccb.cp_channel)
2012 #define br_target (ccb->eata_ccb.cp_id)
2013 #define br_lun (ccb->eata_ccb.cp_LUN)
2014 #define br_index [br_channel][br_target][br_lun]
2015 #define read_buffer_callback (dpt->buffer_receiver br_index )
2016 #define read_buffer (dpt->rw_buffer[br_channel][br_target][br_lun])
2017 #define cb(offset) (ccb->eata_ccb.cp_cdb[offset])
2018 #define br_offset ((cb(3) << 16) | (cb(4) << 8) | cb(5))
2019 #define br_length ((cb(6) << 16) | (cb(7) << 8) | cb(8))
2021 /* Different reasons for being here, you know... */
2022 switch (ccb->eata_ccb.cp_scsi_cmd) {
2023 case SCSI_TM_READ_BUFFER:
2024 if (read_buffer_callback != NULL) {
2025 /* This is a buffer generated by a kernel process */
2026 read_buffer_callback(device_get_unit(dpt->dev),
2027 br_channel, br_target, br_lun,
2029 br_offset, br_length);
2032 * This is a buffer waited for by a user (sleeping)
2038 /* We ALWAYS re-issue the same command; args are don't-care */
2039 dpt_set_target(1, 0, 0, 0, 0, 0, 0, 0, 0);
2042 case SCSI_TM_WRITE_BUFFER:
2043 (ccb->wrbuff_callback) (device_get_unit(dpt->dev), br_channel,
2044 br_target, br_offset, br_length,
2045 br_lun, ccb->status_packet.hba_stat);
2048 device_printf(dpt->dev,
2049 "%s is an unsupported command for target mode\n",
2050 scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd));
2052 dpt->target_ccb[br_channel][br_target][br_lun] = NULL;
2053 dpt_Qpush_free(dpt, ccb);
2058 * Use this function to register a client for a buffer read target operation.
2059 * The function you register will be called every time a buffer is received
2060 * by the target mode code.
2063 dpt_register_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun,
2064 u_int8_t mode, u_int16_t length, u_int16_t offset,
2065 dpt_rec_buff callback, dpt_rb_op_t op)
2068 dpt_ccb_t *ccb = NULL;
2071 dpt = devclass_get_device(dpt_devclass, unit);
2073 return (INVALID_UNIT);
2074 mtx_lock(&dpt->lock);
2076 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE) {
2077 mtx_unlock(&dpt->lock);
2078 return (DRIVER_DOWN);
2081 if ((channel > (dpt->channels - 1)) || (target > (dpt->max_id - 1)) ||
2082 (lun > (dpt->max_lun - 1))) {
2083 mtx_unlock(&dpt->lock);
2084 return (INVALID_SENDER);
2087 if (dpt->buffer_receiver[channel][target][lun] == NULL) {
2088 if (op == REGISTER_BUFFER) {
2089 /* Assign the requested callback */
2090 dpt->buffer_receiver[channel][target][lun] = callback;
2093 /* Process the free list */
2094 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2095 device_printf(dpt->dev,
2096 "ERROR: Cannot allocate any more free CCB's.\n"
2097 " Please try later\n");
2098 mtx_unlock(&dpt->lock);
2099 return (NO_RESOURCES);
2101 /* Now grab the newest CCB */
2102 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2103 mtx_unlock(&dpt->lock);
2104 panic("%s: Got a NULL CCB from pop_free()\n",
2105 device_get_nameunit(dpt->dev));
2108 /* Clean up the leftover of the previous tenant */
2109 ccb->status = DPT_CCB_STATE_NEW;
2110 dpt->target_ccb[channel][target][lun] = ccb;
2112 dpt->rw_buffer[channel][target][lun] =
2113 malloc(DPT_RW_BUFFER_SIZE, M_DEVBUF, M_NOWAIT);
2114 if (dpt->rw_buffer[channel][target][lun] == NULL) {
2115 device_printf(dpt->dev, "Failed to allocate "
2116 "Target-Mode buffer\n");
2117 dpt_Qpush_free(dpt, ccb);
2118 mtx_unlock(&dpt->lock);
2119 return (NO_RESOURCES);
2121 dpt_set_target(0, dpt, channel, target, lun, mode,
2122 length, offset, ccb);
2123 mtx_unlock(&dpt->lock);
2124 return (SUCCESSFULLY_REGISTERED);
2126 mtx_unlock(&dpt->lock);
2127 return (NOT_REGISTERED);
2130 if (op == REGISTER_BUFFER) {
2131 if (dpt->buffer_receiver[channel][target][lun] == callback) {
2132 mtx_unlock(&dpt->lock);
2133 return (ALREADY_REGISTERED);
2135 mtx_unlock(&dpt->lock);
2136 return (REGISTERED_TO_ANOTHER);
2139 if (dpt->buffer_receiver[channel][target][lun] == callback) {
2140 dpt->buffer_receiver[channel][target][lun] = NULL;
2141 dpt_Qpush_free(dpt, ccb);
2142 free(dpt->rw_buffer[channel][target][lun], M_DEVBUF);
2143 mtx_unlock(&dpt->lock);
2144 return (SUCCESSFULLY_REGISTERED);
2146 mtx_unlock(&dpt->lock);
2147 return (INVALID_CALLBACK);
2152 mtx_unlock(&dpt->lock);
2155 /* Return the state of the blinking DPT LED's */
2157 dpt_blinking_led(dpt_softc_t * dpt)
2164 mtx_assert(&dpt->lock, MA_OWNED);
2167 for (ndx = 0, state = 0, previous = 0;
2168 (ndx < 10) && (state != previous);
2171 state = dpt_inl(dpt, 1);
2174 if ((state == previous) && (state == DPT_BLINK_INDICATOR))
2175 result = dpt_inb(dpt, 5);
2181 * Execute a command which did not come from the kernel's SCSI layer.
2182 * The only way to map user commands to bus and target is to comply with the
2183 * standard DPT wire-down scheme:
2186 dpt_user_cmd(dpt_softc_t * dpt, eata_pt_t * user_cmd,
2187 caddr_t cmdarg, int minor_no)
2191 int channel, target, lun;
2196 mtx_assert(&dpt->lock, MA_OWNED);
2198 channel = minor2hba(minor_no);
2199 target = minor2target(minor_no);
2200 lun = minor2lun(minor_no);
2202 if ((channel > (dpt->channels - 1))
2203 || (target > dpt->max_id)
2204 || (lun > dpt->max_lun))
2207 if (target == dpt->sc_scsi_link[channel].adapter_targ) {
2208 /* This one is for the controller itself */
2209 if ((user_cmd->eataID[0] != 'E')
2210 || (user_cmd->eataID[1] != 'A')
2211 || (user_cmd->eataID[2] != 'T')
2212 || (user_cmd->eataID[3] != 'A')) {
2216 /* Get a DPT CCB, so we can prepare a command */
2218 /* Process the free list */
2219 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2220 device_printf(dpt->dev,
2221 "ERROR: Cannot allocate any more free CCB's.\n"
2222 " Please try later\n");
2225 /* Now grab the newest CCB */
2226 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2227 panic("%s: Got a NULL CCB from pop_free()\n",
2228 device_get_nameunit(dpt->dev));
2230 /* Clean up the leftover of the previous tenant */
2231 ccb->status = DPT_CCB_STATE_NEW;
2234 bcopy((caddr_t) & user_cmd->command_packet, (caddr_t) & ccb->eata_ccb,
2235 sizeof(eata_ccb_t));
2237 /* We do not want to do user specified scatter/gather. Why?? */
2238 if (ccb->eata_ccb.scatter == 1)
2241 ccb->eata_ccb.Auto_Req_Sen = 1;
2242 ccb->eata_ccb.reqlen = htonl(sizeof(struct scsi_sense_data));
2243 ccb->eata_ccb.cp_datalen = htonl(sizeof(ccb->eata_ccb.cp_datalen));
2244 ccb->eata_ccb.cp_dataDMA = htonl(vtophys(ccb->eata_ccb.cp_dataDMA));
2245 ccb->eata_ccb.cp_statDMA = htonl(vtophys(&ccb->eata_ccb.cp_statDMA));
2246 ccb->eata_ccb.cp_reqDMA = htonl(vtophys(&ccb->eata_ccb.cp_reqDMA));
2247 ccb->eata_ccb.cp_viraddr = (u_int32_t) & ccb;
2249 if (ccb->eata_ccb.DataIn || ccb->eata_ccb.DataOut) {
2250 /* Data I/O is involved in this command. Alocate buffer */
2251 if (ccb->eata_ccb.cp_datalen > PAGE_SIZE) {
2252 data = contigmalloc(ccb->eata_ccb.cp_datalen,
2253 M_TEMP, M_WAITOK, 0, ~0,
2254 ccb->eata_ccb.cp_datalen,
2257 data = malloc(ccb->eata_ccb.cp_datalen, M_TEMP,
2262 device_printf(dpt->dev, "Cannot allocate %d bytes "
2263 "for EATA command\n",
2264 ccb->eata_ccb.cp_datalen);
2267 #define usr_cmd_DMA (caddr_t)user_cmd->command_packet.cp_dataDMA
2268 if (ccb->eata_ccb.DataIn == 1) {
2269 if (copyin(usr_cmd_DMA,
2270 data, ccb->eata_ccb.cp_datalen) == -1)
2274 /* No data I/O involved here. Make sure the DPT knows that */
2275 ccb->eata_ccb.cp_datalen = 0;
2279 if (ccb->eata_ccb.FWNEST == 1)
2280 ccb->eata_ccb.FWNEST = 0;
2282 if (ccb->eata_ccb.cp_datalen != 0) {
2283 if (dpt_scatter_gather(dpt, ccb, ccb->eata_ccb.cp_datalen,
2291 * We are required to quiet a SCSI bus.
2292 * since we do not queue comands on a bus basis,
2293 * we wait for ALL commands on a controller to complete.
2294 * In the mean time, sched_queue() will not schedule new commands.
2296 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
2297 && (ccb->eata_ccb.cp_cdb[2] == BUS_QUIET)) {
2298 /* We wait for ALL traffic for this HBa to subside */
2299 dpt->state |= DPT_HA_QUIET;
2301 while ((submitted = dpt->submitted_ccbs_count) != 0) {
2302 huh = mtx_sleep((void *) dpt, &dpt->lock,
2303 PCATCH | PRIBIO, "dptqt", 100 * hz);
2306 /* Wakeup call received */
2317 /* Resume normal operation */
2318 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
2319 && (ccb->eata_ccb.cp_cdb[2] == BUS_UNQUIET)) {
2320 dpt->state &= ~DPT_HA_QUIET;
2323 * Schedule the command and submit it.
2324 * We bypass dpt_sched_queue, as it will block on DPT_HA_QUIET
2328 ccb->eata_ccb.Auto_Req_Sen = 1; /* We always want this feature */
2330 ccb->transaction_id = ++dpt->commands_processed;
2331 ccb->std_callback = (ccb_callback) dpt_user_cmd_done;
2332 ccb->result = (u_int32_t) & cmdarg;
2335 #ifdef DPT_MEASURE_PERFORMANCE
2336 ++dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd];
2337 ccb->command_started = microtime_now;
2339 dpt_Qadd_waiting(dpt, ccb);
2341 dpt_sched_queue(dpt);
2343 /* Wait for the command to complete */
2344 (void) mtx_sleep((void *) ccb, &dpt->lock, PCATCH | PRIBIO, "dptucw",
2347 /* Free allocated memory */
2355 dpt_user_cmd_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
2360 mtx_unlock(&dpt->lock);
2363 * If Auto Request Sense is on, copyout the sense struct
2365 #define usr_pckt_DMA (caddr_t)(intptr_t)ntohl(ccb->eata_ccb.cp_reqDMA)
2366 #define usr_pckt_len ntohl(ccb->eata_ccb.cp_datalen)
2367 if (ccb->eata_ccb.Auto_Req_Sen == 1) {
2368 if (copyout((caddr_t) & ccb->sense_data, usr_pckt_DMA,
2369 sizeof(struct scsi_sense_data))) {
2370 mtx_lock(&dpt->lock);
2371 ccb->result = EFAULT;
2372 dpt_Qpush_free(dpt, ccb);
2377 /* If DataIn is on, copyout the data */
2378 if ((ccb->eata_ccb.DataIn == 1)
2379 && (ccb->status_packet.hba_stat == HA_NO_ERROR)) {
2380 if (copyout(ccb->data, usr_pckt_DMA, usr_pckt_len)) {
2381 mtx_lock(&dpt->lock);
2382 dpt_Qpush_free(dpt, ccb);
2383 ccb->result = EFAULT;
2389 /* Copyout the status */
2390 result = ccb->status_packet.hba_stat;
2391 cmd_arg = (caddr_t) ccb->result;
2393 if (copyout((caddr_t) & result, cmd_arg, sizeof(result))) {
2394 mtx_lock(&dpt->lock);
2395 dpt_Qpush_free(dpt, ccb);
2396 ccb->result = EFAULT;
2400 mtx_lock(&dpt->lock);
2401 /* Put the CCB back in the freelist */
2402 ccb->state |= DPT_CCB_STATE_COMPLETED;
2403 dpt_Qpush_free(dpt, ccb);
2405 /* Free allocated memory */