2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1997 by Simon Shapiro
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
36 * dpt_scsi.c: SCSI dependent code for the DPT driver
38 * credits: Assisted by Mike Neuffer in the early low level DPT code
39 * Thanx to Mark Salyzyn of DPT for his assistance.
40 * Special thanx to Justin Gibbs for invaluable help in
41 * making this driver look and work like a FreeBSD component.
42 * Last but not least, many thanx to UCB and the FreeBSD
43 * team for creating and maintaining such a wonderful O/S.
45 * TODO: * Add ISA probe code.
46 * * Add driver-level RAID-0. This will allow interoperability with
47 * NiceTry, M$-Doze, Win-Dog, Slowlaris, etc., in recognizing RAID
48 * arrays that span controllers (Wow!).
55 #include <sys/param.h>
56 #include <sys/systm.h>
58 #include <sys/eventhandler.h>
59 #include <sys/malloc.h>
60 #include <sys/kernel.h>
64 #include <machine/bus.h>
66 #include <machine/resource.h>
71 #include <cam/cam_ccb.h>
72 #include <cam/cam_sim.h>
73 #include <cam/cam_xpt_sim.h>
74 #include <cam/cam_debug.h>
75 #include <cam/scsi/scsi_all.h>
76 #include <cam/scsi/scsi_message.h>
81 #include <dev/dpt/dpt.h>
83 /* dpt_isa.c, and dpt_pci.c need this in a central place */
84 devclass_t dpt_devclass;
86 #define microtime_now dpt_time_now()
88 #define dpt_inl(dpt, port) \
89 bus_read_4((dpt)->io_res, (dpt)->io_offset + port)
90 #define dpt_inb(dpt, port) \
91 bus_read_1((dpt)->io_res, (dpt)->io_offset + port)
92 #define dpt_outl(dpt, port, value) \
93 bus_write_4((dpt)->io_res, (dpt)->io_offset + port, value)
94 #define dpt_outb(dpt, port, value) \
95 bus_write_1((dpt)->io_res, (dpt)->io_offset + port, value)
98 * These will have to be setup by parameters passed at boot/load time. For
99 * performance reasons, we make them constants for the time being.
101 #define dpt_min_segs DPT_MAX_SEGS
102 #define dpt_max_segs DPT_MAX_SEGS
104 /* Definitions for our use of the SIM private CCB area */
105 #define ccb_dccb_ptr spriv_ptr0
106 #define ccb_dpt_ptr spriv_ptr1
108 /* ================= Private Inline Function declarations ===================*/
109 static __inline int dpt_just_reset(dpt_softc_t * dpt);
110 static __inline int dpt_raid_busy(dpt_softc_t * dpt);
111 static __inline int dpt_wait(dpt_softc_t *dpt, u_int bits,
113 static __inline struct dpt_ccb* dptgetccb(struct dpt_softc *dpt);
114 static __inline void dptfreeccb(struct dpt_softc *dpt,
115 struct dpt_ccb *dccb);
116 static __inline bus_addr_t dptccbvtop(struct dpt_softc *dpt,
117 struct dpt_ccb *dccb);
119 static __inline int dpt_send_immediate(dpt_softc_t *dpt,
120 eata_ccb_t *cmd_block,
121 u_int32_t cmd_busaddr,
123 u_int ifc, u_int code,
126 /* ==================== Private Function declarations =======================*/
127 static void dptmapmem(void *arg, bus_dma_segment_t *segs,
128 int nseg, int error);
130 static struct sg_map_node*
131 dptallocsgmap(struct dpt_softc *dpt);
133 static int dptallocccbs(dpt_softc_t *dpt);
135 static int dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb,
136 u_int32_t dccb_busaddr, u_int size,
137 u_int page, u_int target, int extent);
138 static void dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb,
139 u_int32_t dccb_busaddr,
142 static void dpt_poll(struct cam_sim *sim);
143 static void dpt_intr_locked(dpt_softc_t *dpt);
145 static void dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
146 int nseg, int error);
148 static void dpt_action(struct cam_sim *sim, union ccb *ccb);
150 static int dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd,
151 u_int32_t cmd_busaddr,
152 u_int command, u_int retries,
153 u_int ifc, u_int code,
155 static void dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb,
156 union ccb *ccb, u_int hba_stat,
157 u_int scsi_stat, u_int32_t resid);
159 static void dpttimeout(void *arg);
160 static void dptshutdown(void *arg, int howto);
162 /* ================= Private Inline Function definitions ====================*/
164 dpt_just_reset(dpt_softc_t * dpt)
166 if ((dpt_inb(dpt, 2) == 'D')
167 && (dpt_inb(dpt, 3) == 'P')
168 && (dpt_inb(dpt, 4) == 'T')
169 && (dpt_inb(dpt, 5) == 'H'))
176 dpt_raid_busy(dpt_softc_t * dpt)
178 if ((dpt_inb(dpt, 0) == 'D')
179 && (dpt_inb(dpt, 1) == 'P')
180 && (dpt_inb(dpt, 2) == 'T'))
187 dpt_wait(dpt_softc_t *dpt, u_int bits, u_int state)
192 for (i = 0; i < 20000; i++) { /* wait 20ms for not busy */
193 c = dpt_inb(dpt, HA_RSTATUS) & bits;
202 static __inline struct dpt_ccb*
203 dptgetccb(struct dpt_softc *dpt)
205 struct dpt_ccb* dccb;
208 mtx_assert(&dpt->lock, MA_OWNED);
209 if ((dccb = SLIST_FIRST(&dpt->free_dccb_list)) != NULL) {
210 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links);
212 } else if (dpt->total_dccbs < dpt->max_dccbs) {
214 dccb = SLIST_FIRST(&dpt->free_dccb_list);
216 device_printf(dpt->dev, "Can't malloc DCCB\n");
218 SLIST_REMOVE_HEAD(&dpt->free_dccb_list, links);
227 dptfreeccb(struct dpt_softc *dpt, struct dpt_ccb *dccb)
231 mtx_assert(&dpt->lock, MA_OWNED);
232 if ((dccb->state & DCCB_ACTIVE) != 0)
233 LIST_REMOVE(&dccb->ccb->ccb_h, sim_links.le);
234 if ((dccb->state & DCCB_RELEASE_SIMQ) != 0)
235 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
236 else if (dpt->resource_shortage != 0
237 && (dccb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
238 dccb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
239 dpt->resource_shortage = FALSE;
241 dccb->state = DCCB_FREE;
242 SLIST_INSERT_HEAD(&dpt->free_dccb_list, dccb, links);
246 static __inline bus_addr_t
247 dptccbvtop(struct dpt_softc *dpt, struct dpt_ccb *dccb)
249 return (dpt->dpt_ccb_busbase
250 + (u_int32_t)((caddr_t)dccb - (caddr_t)dpt->dpt_dccbs));
253 static __inline struct dpt_ccb *
254 dptccbptov(struct dpt_softc *dpt, bus_addr_t busaddr)
256 return (dpt->dpt_dccbs
257 + ((struct dpt_ccb *)busaddr
258 - (struct dpt_ccb *)dpt->dpt_ccb_busbase));
262 * Send a command for immediate execution by the DPT
263 * See above function for IMPORTANT notes.
266 dpt_send_immediate(dpt_softc_t *dpt, eata_ccb_t *cmd_block,
267 u_int32_t cmd_busaddr, u_int retries,
268 u_int ifc, u_int code, u_int code2)
270 return (dpt_send_eata_command(dpt, cmd_block, cmd_busaddr,
271 EATA_CMD_IMMEDIATE, retries, ifc,
276 /* ===================== Private Function definitions =======================*/
278 dptmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
280 bus_addr_t *busaddrp;
282 busaddrp = (bus_addr_t *)arg;
283 *busaddrp = segs->ds_addr;
286 static struct sg_map_node *
287 dptallocsgmap(struct dpt_softc *dpt)
289 struct sg_map_node *sg_map;
291 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
296 /* Allocate S/G space for the next batch of CCBS */
297 if (bus_dmamem_alloc(dpt->sg_dmat, (void **)&sg_map->sg_vaddr,
298 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
299 free(sg_map, M_DEVBUF);
303 (void)bus_dmamap_load(dpt->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
304 PAGE_SIZE, dptmapmem, &sg_map->sg_physaddr,
307 SLIST_INSERT_HEAD(&dpt->sg_maps, sg_map, links);
313 * Allocate another chunk of CCB's. Return count of entries added.
316 dptallocccbs(dpt_softc_t *dpt)
318 struct dpt_ccb *next_ccb;
319 struct sg_map_node *sg_map;
326 mtx_assert(&dpt->lock, MA_OWNED);
327 next_ccb = &dpt->dpt_dccbs[dpt->total_dccbs];
329 if (next_ccb == dpt->dpt_dccbs) {
331 * First time through. Re-use the S/G
332 * space we allocated for initialization
335 sg_map = SLIST_FIRST(&dpt->sg_maps);
337 sg_map = dptallocsgmap(dpt);
343 segs = sg_map->sg_vaddr;
344 physaddr = sg_map->sg_physaddr;
346 newcount = (PAGE_SIZE / (dpt->sgsize * sizeof(dpt_sg_t)));
347 for (i = 0; dpt->total_dccbs < dpt->max_dccbs && i < newcount; i++) {
350 error = bus_dmamap_create(dpt->buffer_dmat, /*flags*/0,
354 callout_init_mtx(&next_ccb->timer, &dpt->lock, 0);
355 next_ccb->sg_list = segs;
356 next_ccb->sg_busaddr = htonl(physaddr);
357 next_ccb->eata_ccb.cp_dataDMA = htonl(physaddr);
358 next_ccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr);
359 next_ccb->eata_ccb.cp_reqDMA =
360 htonl(dptccbvtop(dpt, next_ccb)
361 + offsetof(struct dpt_ccb, sense_data));
362 next_ccb->eata_ccb.cp_busaddr = dpt->dpt_ccb_busend;
363 next_ccb->state = DCCB_FREE;
364 next_ccb->tag = dpt->total_dccbs;
365 SLIST_INSERT_HEAD(&dpt->free_dccb_list, next_ccb, links);
367 physaddr += (dpt->sgsize * sizeof(dpt_sg_t));
368 dpt->dpt_ccb_busend += sizeof(*next_ccb);
376 * Read a configuration page into the supplied dpt_cont_t buffer.
379 dpt_get_conf(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr,
380 u_int size, u_int page, u_int target, int extent)
389 mtx_assert(&dpt->lock, MA_OWNED);
390 cp = &dccb->eata_ccb;
391 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(*dpt->sp));
395 cp->Auto_Req_Sen = 1;
396 cp->reqlen = sizeof(struct scsi_sense_data);
399 cp->cp_LUN = 0; /* In the EATA packet */
400 cp->cp_lun = 0; /* In the SCSI command */
402 cp->cp_scsi_cmd = INQUIRY;
405 cp->cp_extent = extent;
408 cp->cp_channel = 0; /* DNC, Interpret mode is set */
410 cp->cp_datalen = htonl(size);
413 * This could be a simple for loop, but we suspected the compiler To
414 * have optimized it a bit too much. Wait for the controller to
417 while (((status = dpt_inb(dpt, HA_RSTATUS)) != (HA_SREADY | HA_SSC)
418 && (status != (HA_SREADY | HA_SSC | HA_SERROR))
419 && (status != (HA_SDRDY | HA_SERROR | HA_SDRQ)))
420 || (dpt_wait(dpt, HA_SBUSY, 0))) {
423 * RAID Drives still Spinning up? (This should only occur if
424 * the DPT controller is in a NON PC (PCI?) platform).
426 if (dpt_raid_busy(dpt)) {
427 device_printf(dpt->dev,
428 "WARNING: Get_conf() RSUS failed.\n");
433 DptStat_Reset_BUSY(dpt->sp);
436 * XXXX We might want to do something more clever than aborting at
437 * this point, like resetting (rebooting) the controller and trying
440 if ((result = dpt_send_eata_command(dpt, cp, dccb_busaddr,
441 EATA_CMD_DMA_SEND_CP,
442 10000, 0, 0, 0)) != 0) {
443 device_printf(dpt->dev,
444 "WARNING: Get_conf() failed (%d) to send "
445 "EATA_CMD_DMA_READ_CONFIG\n",
449 /* Wait for two seconds for a response. This can be slow */
452 && !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
457 /* Grab the status and clear interrupts */
458 status = dpt_inb(dpt, HA_RSTATUS);
461 * Check the status carefully. Return only if the
462 * command was successful.
464 if (((status & HA_SERROR) == 0)
465 && (dpt->sp->hba_stat == 0)
466 && (dpt->sp->scsi_stat == 0)
467 && (dpt->sp->residue_len == 0))
470 if (dpt->sp->scsi_stat == SCSI_STATUS_CHECK_COND)
476 /* Detect Cache parameters and size */
478 dpt_detect_cache(dpt_softc_t *dpt, dpt_ccb_t *dccb, u_int32_t dccb_busaddr,
488 mtx_assert(&dpt->lock, MA_OWNED);
491 * Default setting, for best performance..
492 * This is what virtually all cards default to..
494 dpt->cache_type = DPT_CACHE_WRITEBACK;
497 cp = &dccb->eata_ccb;
498 bzero((void *)(uintptr_t)(volatile void *)dpt->sp, sizeof(dpt->sp));
501 /* Setup the command structure */
504 cp->Auto_Req_Sen = 1;
505 cp->reqlen = sizeof(struct scsi_sense_data);
507 cp->cp_id = 0; /* who cares? The HBA will interpret.. */
508 cp->cp_LUN = 0; /* In the EATA packet */
509 cp->cp_lun = 0; /* In the SCSI command */
512 cp->cp_scsi_cmd = EATA_CMD_DMA_SEND_CP;
521 * Build the EATA Command Packet structure
522 * for a Log Sense Command.
524 cp->cp_cdb[0] = 0x4d;
526 cp->cp_cdb[2] = 0x40 | 0x33;
529 cp->cp_datalen = htonl(512);
531 result = dpt_send_eata_command(dpt, cp, dccb_busaddr,
532 EATA_CMD_DMA_SEND_CP,
535 device_printf(dpt->dev,
536 "WARNING: detect_cache() failed (%d) to send "
537 "EATA_CMD_DMA_SEND_CP\n", result);
540 /* Wait for two seconds for a response. This can be slow... */
543 !((status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ);
548 /* Grab the status and clear interrupts */
549 status = dpt_inb(dpt, HA_RSTATUS);
554 if (buff[0] != 0x33) {
557 bytes = DPT_HCP_LENGTH(buff);
558 param = DPT_HCP_FIRST(buff);
560 if (DPT_HCP_CODE(param) != 1) {
562 * DPT Log Page layout error
564 device_printf(dpt->dev, "NOTICE: Log Page (1) layout error\n");
567 if (!(param[4] & 0x4)) {
568 dpt->cache_type = DPT_NO_CACHE;
571 while (DPT_HCP_CODE(param) != 6) {
572 param = DPT_HCP_NEXT(param);
574 || (param >= &buff[bytes])) {
579 if (param[4] & 0x2) {
583 dpt->cache_type = DPT_NO_CACHE;
587 if (param[4] & 0x4) {
588 dpt->cache_type = DPT_CACHE_WRITETHROUGH;
591 /* XXX This isn't correct. This log parameter only has two bytes.... */
593 dpt->cache_size = param[5]
601 dpt_poll(struct cam_sim *sim)
603 dpt_intr_locked(cam_sim_softc(sim));
607 dptexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
609 struct dpt_ccb *dccb;
611 struct dpt_softc *dpt;
613 dccb = (struct dpt_ccb *)arg;
615 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr;
617 mtx_assert(&dpt->lock, MA_OWNED);
621 device_printf(dpt->dev,
622 "Unexepected error 0x%x returned from "
623 "bus_dmamap_load\n", error);
624 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
625 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
626 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
628 dptfreeccb(dpt, dccb);
635 bus_dma_segment_t *end_seg;
638 end_seg = dm_segs + nseg;
640 /* Copy the segments into our SG list */
642 while (dm_segs < end_seg) {
643 sg->seg_len = htonl(dm_segs->ds_len);
644 sg->seg_addr = htonl(dm_segs->ds_addr);
650 dccb->eata_ccb.scatter = 1;
651 dccb->eata_ccb.cp_dataDMA = dccb->sg_busaddr;
652 dccb->eata_ccb.cp_datalen =
653 htonl(nseg * sizeof(dpt_sg_t));
655 dccb->eata_ccb.cp_dataDMA = dccb->sg_list[0].seg_addr;
656 dccb->eata_ccb.cp_datalen = dccb->sg_list[0].seg_len;
659 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
660 op = BUS_DMASYNC_PREREAD;
662 op = BUS_DMASYNC_PREWRITE;
664 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op);
667 dccb->eata_ccb.cp_dataDMA = 0;
668 dccb->eata_ccb.cp_datalen = 0;
672 * Last time we need to check if this CCB needs to
675 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
677 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
678 dptfreeccb(dpt, dccb);
683 dccb->state |= DCCB_ACTIVE;
684 ccb->ccb_h.status |= CAM_SIM_QUEUED;
685 LIST_INSERT_HEAD(&dpt->pending_ccb_list, &ccb->ccb_h, sim_links.le);
686 callout_reset_sbt(&dccb->timer, SBT_1MS * ccb->ccb_h.timeout, 0,
687 dpttimeout, dccb, 0);
688 if (dpt_send_eata_command(dpt, &dccb->eata_ccb,
689 dccb->eata_ccb.cp_busaddr,
690 EATA_CMD_DMA_SEND_CP, 0, 0, 0, 0) != 0) {
691 ccb->ccb_h.status = CAM_NO_HBA; /* HBA dead or just busy?? */
693 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
694 dptfreeccb(dpt, dccb);
700 dpt_action(struct cam_sim *sim, union ccb *ccb)
702 struct dpt_softc *dpt;
704 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("dpt_action\n"));
706 dpt = (struct dpt_softc *)cam_sim_softc(sim);
707 mtx_assert(&dpt->lock, MA_OWNED);
709 if ((dpt->state & DPT_HA_SHUTDOWN_ACTIVE) != 0) {
710 xpt_print_path(ccb->ccb_h.path);
711 printf("controller is shutdown. Aborting CCB.\n");
712 ccb->ccb_h.status = CAM_NO_HBA;
717 switch (ccb->ccb_h.func_code) {
718 /* Common cases first */
719 case XPT_SCSI_IO: /* Execute the requested I/O operation */
721 struct ccb_scsiio *csio;
722 struct ccb_hdr *ccbh;
723 struct dpt_ccb *dccb;
724 struct eata_ccb *eccb;
728 /* Max CDB length is 12 bytes */
729 if (csio->cdb_len > 12) {
730 ccb->ccb_h.status = CAM_REQ_INVALID;
734 if ((dccb = dptgetccb(dpt)) == NULL) {
735 dpt->resource_shortage = 1;
736 xpt_freeze_simq(sim, /*count*/1);
737 ccb->ccb_h.status = CAM_REQUEUE_REQ;
741 eccb = &dccb->eata_ccb;
743 /* Link dccb and ccb so we can find one from the other */
745 ccb->ccb_h.ccb_dccb_ptr = dccb;
746 ccb->ccb_h.ccb_dpt_ptr = dpt;
749 * Explicitly set all flags so that the compiler can
750 * be smart about setting them.
752 eccb->SCSI_Reset = 0;
754 eccb->Auto_Req_Sen = (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE)
759 ccb->ccb_h.target_id == dpt->hostid[cam_sim_bus(sim)]
761 eccb->DataOut = (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1 : 0;
762 eccb->DataIn = (ccb->ccb_h.flags & CAM_DIR_IN) ? 1 : 0;
763 eccb->reqlen = csio->sense_len;
764 eccb->cp_id = ccb->ccb_h.target_id;
765 eccb->cp_channel = cam_sim_bus(sim);
766 eccb->cp_LUN = ccb->ccb_h.target_lun;
768 eccb->cp_dispri = (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
770 eccb->cp_identify = 1;
772 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
773 && csio->tag_action != CAM_TAG_ACTION_NONE) {
774 eccb->cp_msg[0] = csio->tag_action;
775 eccb->cp_msg[1] = dccb->tag;
782 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
783 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
784 bcopy(csio->cdb_io.cdb_ptr,
785 eccb->cp_cdb, csio->cdb_len);
787 /* I guess I could map it in... */
788 ccb->ccb_h.status = CAM_REQ_INVALID;
789 dptfreeccb(dpt, dccb);
794 bcopy(csio->cdb_io.cdb_bytes,
795 eccb->cp_cdb, csio->cdb_len);
798 * If we have any data to send with this command,
799 * map it into bus space.
801 /* Only use S/G if there is a transfer */
802 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
805 error = bus_dmamap_load_ccb(dpt->buffer_dmat,
810 if (error == EINPROGRESS) {
812 * So as to maintain ordering,
813 * freeze the controller queue
814 * until our mapping is
817 xpt_freeze_simq(sim, 1);
818 dccb->state |= CAM_RELEASE_SIMQ;
823 * Does it want them both on or both off?
824 * CAM_DIR_NONE is both on, so this code can
825 * be removed if this is also what the DPT
830 dptexecuteccb(dccb, NULL, 0, 0);
834 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
835 case XPT_ABORT: /* Abort the specified CCB */
837 ccb->ccb_h.status = CAM_REQ_INVALID;
840 case XPT_SET_TRAN_SETTINGS:
842 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
846 case XPT_GET_TRAN_SETTINGS:
847 /* Get default/user set transfer settings for the target */
849 struct ccb_trans_settings *cts = &ccb->cts;
850 struct ccb_trans_settings_scsi *scsi =
851 &cts->proto_specific.scsi;
852 struct ccb_trans_settings_spi *spi =
853 &cts->xport_specific.spi;
855 cts->protocol = PROTO_SCSI;
856 cts->protocol_version = SCSI_REV_2;
857 cts->transport = XPORT_SPI;
858 cts->transport_version = 2;
860 if (cts->type == CTS_TYPE_USER_SETTINGS) {
861 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
862 spi->bus_width = (dpt->max_id > 7)
863 ? MSG_EXT_WDTR_BUS_8_BIT
864 : MSG_EXT_WDTR_BUS_16_BIT;
865 spi->sync_period = 25; /* 10MHz */
866 if (spi->sync_period != 0)
867 spi->sync_offset = 15;
868 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
870 spi->valid = CTS_SPI_VALID_SYNC_RATE
871 | CTS_SPI_VALID_SYNC_OFFSET
872 | CTS_SPI_VALID_BUS_WIDTH
873 | CTS_SPI_VALID_DISC;
874 scsi->valid = CTS_SCSI_VALID_TQ;
875 ccb->ccb_h.status = CAM_REQ_CMP;
877 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
882 case XPT_CALC_GEOMETRY:
885 * XXX Use Adaptec translation until I find out how to
886 * get this information from the card.
888 cam_calc_geometry(&ccb->ccg, /*extended*/1);
892 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
895 ccb->ccb_h.status = CAM_REQ_CMP;
899 case XPT_TERM_IO: /* Terminate the I/O process */
901 ccb->ccb_h.status = CAM_REQ_INVALID;
904 case XPT_PATH_INQ: /* Path routing inquiry */
906 struct ccb_pathinq *cpi = &ccb->cpi;
908 cpi->version_num = 1;
909 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
911 cpi->hba_inquiry |= PI_WIDE_16;
912 cpi->target_sprt = 0;
914 cpi->hba_eng_cnt = 0;
915 cpi->max_target = dpt->max_id;
916 cpi->max_lun = dpt->max_lun;
917 cpi->initiator_id = dpt->hostid[cam_sim_bus(sim)];
918 cpi->bus_id = cam_sim_bus(sim);
919 cpi->base_transfer_speed = 3300;
920 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
921 strlcpy(cpi->hba_vid, "DPT", HBA_IDLEN);
922 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
923 cpi->unit_number = cam_sim_unit(sim);
924 cpi->transport = XPORT_SPI;
925 cpi->transport_version = 2;
926 cpi->protocol = PROTO_SCSI;
927 cpi->protocol_version = SCSI_REV_2;
928 cpi->ccb_h.status = CAM_REQ_CMP;
933 ccb->ccb_h.status = CAM_REQ_INVALID;
940 * This routine will try to send an EATA command to the DPT HBA.
941 * It will, by default, try 20,000 times, waiting 50us between tries.
942 * It returns 0 on success and 1 on failure.
945 dpt_send_eata_command(dpt_softc_t *dpt, eata_ccb_t *cmd_block,
946 u_int32_t cmd_busaddr, u_int command, u_int retries,
947 u_int ifc, u_int code, u_int code2)
955 * I hate this polling nonsense. Wish there was a way to tell the DPT
956 * to go get commands at its own pace, or to interrupt when ready.
957 * In the mean time we will measure how many itterations it really
960 for (loop = 0; loop < retries; loop++) {
961 if ((dpt_inb(dpt, HA_RAUXSTAT) & HA_ABUSY) == 0)
967 if (loop < retries) {
968 #ifdef DPT_MEASURE_PERFORMANCE
969 if (loop > dpt->performance.max_eata_tries)
970 dpt->performance.max_eata_tries = loop;
972 if (loop < dpt->performance.min_eata_tries)
973 dpt->performance.min_eata_tries = loop;
976 #ifdef DPT_MEASURE_PERFORMANCE
977 ++dpt->performance.command_too_busy;
982 /* The controller is alive, advance the wedge timer */
984 dpt->last_contact = microtime_now;
987 if (cmd_block == NULL)
989 #if (BYTE_ORDER == BIG_ENDIAN)
991 cmd_busaddr = ((cmd_busaddr >> 24) & 0xFF)
992 | ((cmd_busaddr >> 16) & 0xFF)
993 | ((cmd_busaddr >> 8) & 0xFF)
994 | (cmd_busaddr & 0xFF);
997 /* And now the address */
998 dpt_outl(dpt, HA_WDMAADDR, cmd_busaddr);
1000 if (command == EATA_CMD_IMMEDIATE) {
1001 if (cmd_block == NULL) {
1002 dpt_outb(dpt, HA_WCODE2, code2);
1003 dpt_outb(dpt, HA_WCODE, code);
1005 dpt_outb(dpt, HA_WIFC, ifc);
1007 dpt_outb(dpt, HA_WCOMMAND, command);
1013 /* ==================== Exported Function definitions =======================*/
1015 dpt_alloc(device_t dev)
1017 dpt_softc_t *dpt = device_get_softc(dev);
1020 mtx_init(&dpt->lock, "dpt", NULL, MTX_DEF);
1021 SLIST_INIT(&dpt->free_dccb_list);
1022 LIST_INIT(&dpt->pending_ccb_list);
1023 for (i = 0; i < MAX_CHANNELS; i++)
1024 dpt->resetlevel[i] = DPT_HA_OK;
1026 #ifdef DPT_MEASURE_PERFORMANCE
1027 dpt_reset_performance(dpt);
1028 #endif /* DPT_MEASURE_PERFORMANCE */
1033 dpt_free(struct dpt_softc *dpt)
1035 switch (dpt->init_level) {
1038 bus_dmamap_unload(dpt->dccb_dmat, dpt->dccb_dmamap);
1040 bus_dmamem_free(dpt->dccb_dmat, dpt->dpt_dccbs,
1043 bus_dma_tag_destroy(dpt->dccb_dmat);
1045 bus_dma_tag_destroy(dpt->buffer_dmat);
1048 struct sg_map_node *sg_map;
1050 while ((sg_map = SLIST_FIRST(&dpt->sg_maps)) != NULL) {
1051 SLIST_REMOVE_HEAD(&dpt->sg_maps, links);
1052 bus_dmamap_unload(dpt->sg_dmat,
1054 bus_dmamem_free(dpt->sg_dmat, sg_map->sg_vaddr,
1056 free(sg_map, M_DEVBUF);
1058 bus_dma_tag_destroy(dpt->sg_dmat);
1063 mtx_destroy(&dpt->lock);
1067 dpt_alloc_resources (device_t dev)
1072 dpt = device_get_softc(dev);
1074 dpt->io_res = bus_alloc_resource_any(dev, dpt->io_type, &dpt->io_rid,
1076 if (dpt->io_res == NULL) {
1077 device_printf(dev, "No I/O space?!\n");
1082 dpt->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dpt->irq_rid,
1084 if (dpt->irq_res == NULL) {
1085 device_printf(dev, "No IRQ!\n");
1097 dpt_release_resources (device_t dev)
1099 struct dpt_softc * dpt;
1101 dpt = device_get_softc(dev);
1104 bus_teardown_intr(dev, dpt->irq_res, dpt->ih);
1106 bus_release_resource(dev, dpt->io_type, dpt->io_rid, dpt->io_res);
1108 bus_release_resource(dev, SYS_RES_IRQ, dpt->irq_rid, dpt->irq_res);
1110 bus_release_resource(dev, SYS_RES_DRQ, dpt->drq_rid, dpt->drq_res);
1115 static u_int8_t string_sizes[] =
1117 sizeof(((dpt_inq_t*)NULL)->vendor),
1118 sizeof(((dpt_inq_t*)NULL)->modelNum),
1119 sizeof(((dpt_inq_t*)NULL)->firmware),
1120 sizeof(((dpt_inq_t*)NULL)->protocol),
1124 dpt_init(struct dpt_softc *dpt)
1127 struct sg_map_node *sg_map;
1134 dpt->init_level = 0;
1135 SLIST_INIT(&dpt->sg_maps);
1136 mtx_lock(&dpt->lock);
1138 #ifdef DPT_RESET_BOARD
1139 device_printf(dpt->dev, "resetting HBA\n");
1140 dpt_outb(dpt, HA_WCOMMAND, EATA_CMD_RESET);
1142 /* XXX Shouldn't we poll a status register or something??? */
1144 /* DMA tag for our S/G structures. We allocate in page sized chunks */
1145 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat,
1148 /* lowaddr */ BUS_SPACE_MAXADDR,
1149 /* highaddr */ BUS_SPACE_MAXADDR,
1151 /* filterarg */ NULL,
1152 /* maxsize */ PAGE_SIZE,
1154 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1156 /* lockfunc */ NULL,
1158 &dpt->sg_dmat) != 0) {
1165 * We allocate our DPT ccbs as a contiguous array of bus dma'able
1166 * memory. To get the allocation size, we need to know how many
1167 * ccbs the card supports. This requires a ccb. We solve this
1168 * chicken and egg problem by allocating some re-usable S/G space
1169 * up front, and treating it as our status packet, CCB, and target
1170 * memory space for these commands.
1172 sg_map = dptallocsgmap(dpt);
1176 dpt->sp = (volatile dpt_sp_t *)sg_map->sg_vaddr;
1177 dccb = (struct dpt_ccb *)(uintptr_t)(volatile void *)&dpt->sp[1];
1178 bzero(dccb, sizeof(*dccb));
1179 dpt->sp_physaddr = sg_map->sg_physaddr;
1180 dccb->eata_ccb.cp_dataDMA =
1181 htonl(sg_map->sg_physaddr + sizeof(dpt_sp_t) + sizeof(*dccb));
1182 dccb->eata_ccb.cp_busaddr = ~0;
1183 dccb->eata_ccb.cp_statDMA = htonl(dpt->sp_physaddr);
1184 dccb->eata_ccb.cp_reqDMA = htonl(dpt->sp_physaddr + sizeof(*dccb)
1185 + offsetof(struct dpt_ccb, sense_data));
1187 /* Okay. Fetch our config */
1188 bzero(&dccb[1], sizeof(conf)); /* data area */
1189 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1190 sizeof(conf), 0xc1, 7, 1);
1193 device_printf(dpt->dev, "Failed to get board configuration\n");
1196 bcopy(&dccb[1], &conf, sizeof(conf));
1198 bzero(&dccb[1], sizeof(dpt->board_data));
1199 retval = dpt_get_conf(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1200 sizeof(dpt->board_data), 0, conf.scsi_id0, 0);
1202 device_printf(dpt->dev, "Failed to get inquiry information\n");
1205 bcopy(&dccb[1], &dpt->board_data, sizeof(dpt->board_data));
1207 dpt_detect_cache(dpt, dccb, sg_map->sg_physaddr + sizeof(dpt_sp_t),
1208 (u_int8_t *)&dccb[1]);
1210 switch (ntohl(conf.splen)) {
1212 dpt->EATA_revision = 'a';
1215 dpt->EATA_revision = 'b';
1218 dpt->EATA_revision = 'c';
1221 dpt->EATA_revision = 'z';
1224 dpt->EATA_revision = '?';
1227 dpt->max_id = conf.MAX_ID;
1228 dpt->max_lun = conf.MAX_LUN;
1229 dpt->irq = conf.IRQ;
1230 dpt->dma_channel = (8 - conf.DMA_channel) & 7;
1231 dpt->channels = conf.MAX_CHAN + 1;
1232 dpt->state |= DPT_HA_OK;
1234 dpt->primary = FALSE;
1236 dpt->primary = TRUE;
1238 dpt->more_support = conf.MORE_support;
1240 if (strncmp(dpt->board_data.firmware, "07G0", 4) >= 0)
1241 dpt->immediate_support = 1;
1243 dpt->immediate_support = 0;
1245 dpt->cplen = ntohl(conf.cplen);
1246 dpt->cppadlen = ntohs(conf.cppadlen);
1247 dpt->max_dccbs = ntohs(conf.queuesiz);
1249 if (dpt->max_dccbs > 256) {
1250 device_printf(dpt->dev, "Max CCBs reduced from %d to "
1251 "256 due to tag algorithm\n", dpt->max_dccbs);
1252 dpt->max_dccbs = 256;
1255 dpt->hostid[0] = conf.scsi_id0;
1256 dpt->hostid[1] = conf.scsi_id1;
1257 dpt->hostid[2] = conf.scsi_id2;
1262 dpt->sgsize = ntohs(conf.SGsiz);
1264 /* We can only get 64k buffers, so don't bother to waste space. */
1265 if (dpt->sgsize < 17 || dpt->sgsize > 32)
1268 if (dpt->sgsize > dpt_max_segs)
1269 dpt->sgsize = dpt_max_segs;
1271 /* DMA tag for mapping buffers into device visible space. */
1272 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat,
1275 /* lowaddr */ BUS_SPACE_MAXADDR,
1276 /* highaddr */ BUS_SPACE_MAXADDR,
1278 /* filterarg */ NULL,
1279 /* maxsize */ DFLTPHYS,
1280 /* nsegments */ dpt->sgsize,
1281 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1282 /* flags */ BUS_DMA_ALLOCNOW,
1283 /* lockfunc */ busdma_lock_mutex,
1284 /* lockarg */ &dpt->lock,
1285 &dpt->buffer_dmat) != 0) {
1286 device_printf(dpt->dev,
1287 "bus_dma_tag_create(...,dpt->buffer_dmat) failed\n");
1293 /* DMA tag for our ccb structures and interrupt status packet */
1294 if (bus_dma_tag_create( /* parent */ dpt->parent_dmat,
1297 /* lowaddr */ BUS_SPACE_MAXADDR,
1298 /* highaddr */ BUS_SPACE_MAXADDR,
1300 /* filterarg */ NULL,
1301 /* maxsize */ (dpt->max_dccbs *
1302 sizeof(struct dpt_ccb)) +
1305 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1307 /* lockfunc */ NULL,
1309 &dpt->dccb_dmat) != 0) {
1310 device_printf(dpt->dev,
1311 "bus_dma_tag_create(...,dpt->dccb_dmat) failed\n");
1317 /* Allocation for our ccbs and interrupt status packet */
1318 if (bus_dmamem_alloc(dpt->dccb_dmat, (void **)&dpt->dpt_dccbs,
1319 BUS_DMA_NOWAIT, &dpt->dccb_dmamap) != 0) {
1320 device_printf(dpt->dev,
1321 "bus_dmamem_alloc(dpt->dccb_dmat,...) failed\n");
1327 /* And permanently map them */
1328 bus_dmamap_load(dpt->dccb_dmat, dpt->dccb_dmamap,
1330 (dpt->max_dccbs * sizeof(struct dpt_ccb))
1332 dptmapmem, &dpt->dpt_ccb_busbase, /*flags*/0);
1334 /* Clear them out. */
1335 bzero(dpt->dpt_dccbs,
1336 (dpt->max_dccbs * sizeof(struct dpt_ccb)) + sizeof(dpt_sp_t));
1338 dpt->dpt_ccb_busend = dpt->dpt_ccb_busbase;
1340 dpt->sp = (dpt_sp_t*)&dpt->dpt_dccbs[dpt->max_dccbs];
1341 dpt->sp_physaddr = dpt->dpt_ccb_busbase
1342 + (dpt->max_dccbs * sizeof(dpt_ccb_t));
1345 /* Allocate our first batch of ccbs */
1346 if (dptallocccbs(dpt) == 0) {
1347 device_printf(dpt->dev, "dptallocccbs(dpt) == 0\n");
1348 mtx_unlock(&dpt->lock);
1352 /* Prepare for Target Mode */
1353 dpt->target_mode_enabled = 1;
1355 /* Nuke excess spaces from inquiry information */
1356 strp = dpt->board_data.vendor;
1357 for (i = 0; i < sizeof(string_sizes); i++) {
1358 index = string_sizes[i] - 1;
1359 while (index && (strp[index] == ' '))
1360 strp[index--] = '\0';
1361 strp += string_sizes[i];
1364 device_printf(dpt->dev, "%.8s %.16s FW Rev. %.4s, ",
1365 dpt->board_data.vendor,
1366 dpt->board_data.modelNum, dpt->board_data.firmware);
1368 printf("%d channel%s, ", dpt->channels, dpt->channels > 1 ? "s" : "");
1370 if (dpt->cache_type != DPT_NO_CACHE
1371 && dpt->cache_size != 0) {
1372 printf("%s Cache, ",
1373 dpt->cache_type == DPT_CACHE_WRITETHROUGH
1374 ? "Write-Through" : "Write-Back");
1377 printf("%d CCBs\n", dpt->max_dccbs);
1378 mtx_unlock(&dpt->lock);
1382 mtx_unlock(&dpt->lock);
1387 dpt_attach(dpt_softc_t *dpt)
1389 struct cam_devq *devq;
1393 * Create the device queue for our SIM.
1395 devq = cam_simq_alloc(dpt->max_dccbs);
1399 mtx_lock(&dpt->lock);
1400 for (i = 0; i < dpt->channels; i++) {
1402 * Construct our SIM entry
1404 dpt->sims[i] = cam_sim_alloc(dpt_action, dpt_poll, "dpt",
1405 dpt, device_get_unit(dpt->dev), &dpt->lock,
1407 /*tagged*/dpt->max_dccbs, devq);
1408 if (dpt->sims[i] == NULL) {
1410 cam_simq_free(devq);
1412 printf( "%s(): Unable to attach bus %d "
1413 "due to resource shortage\n",
1418 if (xpt_bus_register(dpt->sims[i], dpt->dev, i) != CAM_SUCCESS){
1419 cam_sim_free(dpt->sims[i], /*free_devq*/i == 0);
1420 dpt->sims[i] = NULL;
1424 if (xpt_create_path(&dpt->paths[i], /*periph*/NULL,
1425 cam_sim_path(dpt->sims[i]),
1426 CAM_TARGET_WILDCARD,
1427 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1428 xpt_bus_deregister(cam_sim_path(dpt->sims[i]));
1429 cam_sim_free(dpt->sims[i], /*free_devq*/i == 0);
1430 dpt->sims[i] = NULL;
1435 mtx_unlock(&dpt->lock);
1437 EVENTHANDLER_REGISTER(shutdown_final, dptshutdown,
1438 dpt, SHUTDOWN_PRI_DEFAULT);
1443 dpt_detach (device_t dev)
1445 struct dpt_softc * dpt;
1448 dpt = device_get_softc(dev);
1450 mtx_lock(&dpt->lock);
1451 for (i = 0; i < dpt->channels; i++) {
1453 xpt_async(AC_LOST_DEVICE, dpt->paths[i], NULL);
1455 xpt_free_path(dpt->paths[i]);
1456 xpt_bus_deregister(cam_sim_path(dpt->sims[i]));
1457 cam_sim_free(dpt->sims[i], /*free_devq*/TRUE);
1459 mtx_unlock(&dpt->lock);
1461 dptshutdown((void *)dpt, SHUTDOWN_PRI_DEFAULT);
1463 dpt_release_resources(dev);
1471 * This is the interrupt handler for the DPT driver.
1479 mtx_lock(&dpt->lock);
1480 dpt_intr_locked(dpt);
1481 mtx_unlock(&dpt->lock);
1485 dpt_intr_locked(dpt_softc_t *dpt)
1493 u_int32_t residue_len; /* Number of bytes not transferred */
1495 /* First order of business is to check if this interrupt is for us */
1496 while (((aux_status = dpt_inb(dpt, HA_RAUXSTAT)) & HA_AIRQ) != 0) {
1499 * What we want to do now, is to capture the status, all of it,
1500 * move it where it belongs, wake up whoever sleeps waiting to
1501 * process this result, and get out of here.
1503 if (dpt->sp->ccb_busaddr < dpt->dpt_ccb_busbase
1504 || dpt->sp->ccb_busaddr >= dpt->dpt_ccb_busend) {
1505 device_printf(dpt->dev,
1506 "Encountered bogus status packet\n");
1507 status = dpt_inb(dpt, HA_RSTATUS);
1511 dccb = dptccbptov(dpt, dpt->sp->ccb_busaddr);
1513 dpt->sp->ccb_busaddr = ~0;
1515 /* Ignore status packets with EOC not set */
1516 if (dpt->sp->EOC == 0) {
1517 device_printf(dpt->dev,
1518 "ERROR: Request %d received with "
1519 "clear EOC.\n Marking as LOST.\n",
1520 dccb->transaction_id);
1522 /* This CLEARS the interrupt! */
1523 status = dpt_inb(dpt, HA_RSTATUS);
1529 * Double buffer the status information so the hardware can
1530 * work on updating the status packet while we decifer the
1531 * one we were just interrupted for.
1532 * According to Mark Salyzyn, we only need few pieces of it.
1534 hba_stat = dpt->sp->hba_stat;
1535 scsi_stat = dpt->sp->scsi_stat;
1536 residue_len = dpt->sp->residue_len;
1538 /* Clear interrupts, check for error */
1539 if ((status = dpt_inb(dpt, HA_RSTATUS)) & HA_SERROR) {
1541 * Error Condition. Check for magic cookie. Exit
1542 * this test on earliest sign of non-reset condition
1545 /* Check that this is not a board reset interrupt */
1546 if (dpt_just_reset(dpt)) {
1547 device_printf(dpt->dev, "HBA rebooted.\n"
1548 " All transactions should be "
1551 device_printf(dpt->dev,
1552 ">>---->> This is incomplete, "
1553 "fix me.... <<----<<");
1554 panic("DPT Rebooted");
1560 callout_stop(&dccb->timer);
1561 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1562 bus_dmasync_op_t op;
1564 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1565 op = BUS_DMASYNC_POSTREAD;
1567 op = BUS_DMASYNC_POSTWRITE;
1568 bus_dmamap_sync(dpt->buffer_dmat, dccb->dmamap, op);
1569 bus_dmamap_unload(dpt->buffer_dmat, dccb->dmamap);
1572 /* Common Case inline... */
1573 if (hba_stat == HA_NO_ERROR) {
1574 ccb->csio.scsi_status = scsi_stat;
1575 ccb->ccb_h.status = 0;
1576 switch (scsi_stat) {
1577 case SCSI_STATUS_OK:
1578 ccb->ccb_h.status |= CAM_REQ_CMP;
1580 case SCSI_STATUS_CHECK_COND:
1581 case SCSI_STATUS_CMD_TERMINATED:
1582 bcopy(&dccb->sense_data, &ccb->csio.sense_data,
1583 ccb->csio.sense_len);
1584 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1587 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1588 /* XXX Freeze DevQ */
1591 ccb->csio.resid = residue_len;
1592 dptfreeccb(dpt, dccb);
1595 dptprocesserror(dpt, dccb, ccb, hba_stat, scsi_stat,
1602 dptprocesserror(dpt_softc_t *dpt, dpt_ccb_t *dccb, union ccb *ccb,
1603 u_int hba_stat, u_int scsi_stat, u_int32_t resid)
1605 ccb->csio.resid = resid;
1608 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1611 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1613 case HA_SCSIBUS_RESET:
1614 case HA_HBA_POWER_UP: /* Similar effect to a bus reset??? */
1615 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1618 case HA_CP_RESET: /* XXX ??? */
1619 case HA_CP_ABORT_NA: /* XXX ??? */
1620 case HA_CP_RESET_NA: /* XXX ??? */
1621 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1622 ccb->ccb_h.status = CAM_REQ_ABORTED;
1627 case HA_PCI_STABORT:
1631 ccb->ccb_h.status = CAM_UNCOR_PARITY;
1633 case HA_UNX_MSGRJCT:
1634 ccb->ccb_h.status = CAM_MSG_REJECT_REC;
1636 case HA_UNX_BUSPHASE:
1637 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1639 case HA_UNX_BUS_FREE:
1640 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1643 case HA_RESET_STUCK:
1645 * Dead??? Can the controller get unstuck
1646 * from these conditions
1648 ccb->ccb_h.status = CAM_NO_HBA;
1650 case HA_RSENSE_FAIL:
1651 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1654 device_printf(dpt->dev, "Undocumented Error %x\n", hba_stat);
1655 printf("Please mail this message to shimon@simon-shapiro.org\n");
1656 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1659 dptfreeccb(dpt, dccb);
1664 dpttimeout(void *arg)
1666 struct dpt_ccb *dccb;
1668 struct dpt_softc *dpt;
1670 dccb = (struct dpt_ccb *)arg;
1672 dpt = (struct dpt_softc *)ccb->ccb_h.ccb_dpt_ptr;
1673 mtx_assert(&dpt->lock, MA_OWNED);
1674 xpt_print_path(ccb->ccb_h.path);
1675 printf("CCB %p - timed out\n", (void *)dccb);
1678 * Try to clear any pending jobs. FreeBSD will lose interrupts,
1679 * leaving the controller suspended, and commands timed-out.
1680 * By calling the interrupt handler, any command thus stuck will be
1683 dpt_intr_locked(dpt);
1685 if ((dccb->state & DCCB_ACTIVE) == 0) {
1686 xpt_print_path(ccb->ccb_h.path);
1687 printf("CCB %p - timed out CCB already completed\n",
1692 /* Abort this particular command. Leave all others running */
1693 dpt_send_immediate(dpt, &dccb->eata_ccb, dccb->eata_ccb.cp_busaddr,
1694 /*retries*/20000, EATA_SPECIFIC_ABORT, 0, 0);
1695 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1699 * Shutdown the controller and ensure that the cache is completely flushed.
1700 * Called from the shutdown_final event after all disk access has completed.
1703 dptshutdown(void *arg, int howto)
1707 dpt = (dpt_softc_t *)arg;
1709 device_printf(dpt->dev,
1710 "Shutting down (mode %x) HBA. Please wait...\n", howto);
1713 * What we do for a shutdown, is give the DPT early power loss warning
1715 mtx_lock(&dpt->lock);
1716 dpt_send_immediate(dpt, NULL, 0, EATA_POWER_OFF_WARN, 0, 0, 0);
1717 mtx_unlock(&dpt->lock);
1718 DELAY(1000 * 1000 * 5);
1719 device_printf(dpt->dev, "Controller was warned of shutdown and is now "
1723 /*============================================================================*/
1726 #ifdef DPT_RESET_HBA
1729 ** Function name : dpt_reset_hba
1731 ** Description : Reset the HBA and properly discard all pending work
1736 dpt_reset_hba(dpt_softc_t *dpt)
1739 dpt_ccb_t dccb, *dccbp;
1741 struct scsi_xfer *xs;
1743 mtx_assert(&dpt->lock, MA_OWNED);
1745 /* Prepare a control block. The SCSI command part is immaterial */
1748 dccb.state = DPT_CCB_STATE_NEW;
1749 dccb.std_callback = NULL;
1750 dccb.wrbuff_callback = NULL;
1752 ccb = &dccb.eata_ccb;
1753 ccb->CP_OpCode = EATA_CMD_RESET;
1754 ccb->SCSI_Reset = 0;
1756 ccb->Auto_Req_Sen = 1;
1757 ccb->cp_id = 0; /* Should be ignored */
1761 ccb->reqlen = htonl(sizeof(struct scsi_sense_data));
1762 ccb->cp_statDMA = htonl(vtophys(&ccb->cp_statDMA));
1763 ccb->cp_reqDMA = htonl(vtophys(&ccb->cp_reqDMA));
1764 ccb->cp_viraddr = (u_int32_t) & ccb;
1766 ccb->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
1767 ccb->cp_scsi_cmd = 0; /* Should be ignored */
1769 /* Lock up the submitted queue. We are very persistent here */
1770 while (dpt->queue_status & DPT_SUBMITTED_QUEUE_ACTIVE) {
1774 dpt->queue_status |= DPT_SUBMITTED_QUEUE_ACTIVE;
1776 /* Send the RESET message */
1777 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb,
1778 EATA_CMD_RESET, 0, 0, 0, 0)) != 0) {
1779 device_printf(dpt->dev, "Failed to send the RESET message.\n"
1780 " Trying cold boot (ouch!)\n");
1783 if ((result = dpt_send_eata_command(dpt, &dccb.eata_ccb,
1784 EATA_COLD_BOOT, 0, 0,
1786 panic("%s: Faild to cold boot the HBA\n",
1787 device_get_nameunit(dpt->dev));
1789 #ifdef DPT_MEASURE_PERFORMANCE
1790 dpt->performance.cold_boots++;
1791 #endif /* DPT_MEASURE_PERFORMANCE */
1794 #ifdef DPT_MEASURE_PERFORMANCE
1795 dpt->performance.warm_starts++;
1796 #endif /* DPT_MEASURE_PERFORMANCE */
1798 device_printf(dpt->dev,
1799 "Aborting pending requests. O/S should re-submit\n");
1801 while ((dccbp = TAILQ_FIRST(&dpt->completed_ccbs)) != NULL) {
1802 struct scsi_xfer *xs = dccbp->xs;
1804 /* Not all transactions have xs structs */
1806 /* Tell the kernel proper this did not complete well */
1807 xs->error |= XS_SELTIMEOUT;
1808 xs->flags |= SCSI_ITSDONE;
1812 dpt_Qremove_submitted(dpt, dccbp);
1814 /* Remember, Callbacks are NOT in the standard queue */
1815 if (dccbp->std_callback != NULL) {
1816 (dccbp->std_callback)(dpt, dccbp->eata_ccb.cp_channel,
1819 dpt_Qpush_free(dpt, dccbp);
1823 device_printf(dpt->dev, "reset done aborting all pending commands\n");
1824 dpt->queue_status &= ~DPT_SUBMITTED_QUEUE_ACTIVE;
1827 #endif /* DPT_RESET_HBA */
1830 * Build a Command Block for target mode READ/WRITE BUFFER,
1831 * with the ``sync'' bit ON.
1833 * Although the length and offset are 24 bit fields in the command, they cannot
1834 * exceed 8192 bytes, so we take them as short integers andcheck their range.
1835 * If they are sensless, we round them to zero offset, maximum length and
1840 dpt_target_ccb(dpt_softc_t * dpt, int bus, u_int8_t target, u_int8_t lun,
1841 dpt_ccb_t * ccb, int mode, u_int8_t command,
1842 u_int16_t length, u_int16_t offset)
1846 mtx_assert(&dpt->lock, MA_OWNED);
1847 if ((length + offset) > DPT_MAX_TARGET_MODE_BUFFER_SIZE) {
1848 device_printf(dpt->dev,
1849 "Length of %d, and offset of %d are wrong\n",
1851 length = DPT_MAX_TARGET_MODE_BUFFER_SIZE;
1856 ccb->state = DPT_CCB_STATE_NEW;
1857 ccb->std_callback = (ccb_callback) dpt_target_done;
1858 ccb->wrbuff_callback = NULL;
1860 cp = &ccb->eata_ccb;
1861 cp->CP_OpCode = EATA_CMD_DMA_SEND_CP;
1864 cp->Auto_Req_Sen = 1;
1869 cp->reqlen = htonl(sizeof(struct scsi_sense_data));
1870 cp->cp_statDMA = htonl(vtophys(&cp->cp_statDMA));
1871 cp->cp_reqDMA = htonl(vtophys(&cp->cp_reqDMA));
1872 cp->cp_viraddr = (u_int32_t) & ccb;
1874 cp->cp_msg[0] = HA_IDENTIFY_MSG | HA_DISCO_RECO;
1876 cp->cp_scsi_cmd = command;
1877 cp->cp_cdb[1] = (u_int8_t) (mode & SCSI_TM_MODE_MASK);
1878 cp->cp_lun = lun; /* Order is important here! */
1879 cp->cp_cdb[2] = 0x00; /* Buffer Id, only 1 :-( */
1880 cp->cp_cdb[3] = (length >> 16) & 0xFF; /* Buffer offset MSB */
1881 cp->cp_cdb[4] = (length >> 8) & 0xFF;
1882 cp->cp_cdb[5] = length & 0xFF;
1883 cp->cp_cdb[6] = (length >> 16) & 0xFF; /* Length MSB */
1884 cp->cp_cdb[7] = (length >> 8) & 0xFF;
1885 cp->cp_cdb[8] = length & 0xFF; /* Length LSB */
1886 cp->cp_cdb[9] = 0; /* No sync, no match bits */
1889 * This could be optimized to live in dpt_register_buffer.
1890 * We keep it here, just in case the kernel decides to reallocate pages
1892 if (dpt_scatter_gather(dpt, ccb, DPT_RW_BUFFER_SIZE,
1893 dpt->rw_buffer[bus][target][lun])) {
1894 device_printf(dpt->dev, "Failed to setup Scatter/Gather for "
1895 "Target-Mode buffer\n");
1899 /* Setup a target mode READ command */
1902 dpt_set_target(int redo, dpt_softc_t * dpt,
1903 u_int8_t bus, u_int8_t target, u_int8_t lun, int mode,
1904 u_int16_t length, u_int16_t offset, dpt_ccb_t * ccb)
1907 mtx_assert(&dpt->lock, MA_OWNED);
1908 if (dpt->target_mode_enabled) {
1910 dpt_target_ccb(dpt, bus, target, lun, ccb, mode,
1911 SCSI_TM_READ_BUFFER, length, offset);
1913 ccb->transaction_id = ++dpt->commands_processed;
1915 #ifdef DPT_MEASURE_PERFORMANCE
1916 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++;
1917 ccb->command_started = microtime_now;
1919 dpt_Qadd_waiting(dpt, ccb);
1920 dpt_sched_queue(dpt);
1922 device_printf(dpt->dev,
1923 "Target Mode Request, but Target Mode is OFF\n");
1928 * Schedule a buffer to be sent to another target.
1929 * The work will be scheduled and the callback provided will be called when
1930 * the work is actually done.
1932 * Please NOTE: ``Anyone'' can send a buffer, but only registered clients
1933 * get notified of receipt of buffers.
1937 dpt_send_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun,
1938 u_int8_t mode, u_int16_t length, u_int16_t offset, void *data,
1939 buff_wr_done callback)
1942 dpt_ccb_t *ccb = NULL;
1944 /* This is an external call. Be a bit paranoid */
1945 dpt = devclass_get_device(dpt_devclass, unit);
1947 return (INVALID_UNIT);
1949 mtx_lock(&dpt->lock);
1950 if (dpt->target_mode_enabled) {
1951 if ((channel >= dpt->channels) || (target > dpt->max_id) ||
1952 (lun > dpt->max_lun)) {
1953 mtx_unlock(&dpt->lock);
1954 return (INVALID_SENDER);
1956 if ((dpt->rw_buffer[channel][target][lun] == NULL) ||
1957 (dpt->buffer_receiver[channel][target][lun] == NULL)) {
1958 mtx_unlock(&dpt->lock);
1959 return (NOT_REGISTERED);
1962 /* Process the free list */
1963 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
1964 device_printf(dpt->dev,
1965 "ERROR: Cannot allocate any more free CCB's.\n"
1966 " Please try later\n");
1967 mtx_unlock(&dpt->lock);
1968 return (NO_RESOURCES);
1970 /* Now grab the newest CCB */
1971 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
1972 mtx_unlock(&dpt->lock);
1973 panic("%s: Got a NULL CCB from pop_free()\n",
1974 device_get_nameunit(dpt->dev));
1977 bcopy(dpt->rw_buffer[channel][target][lun] + offset, data, length);
1978 dpt_target_ccb(dpt, channel, target, lun, ccb, mode,
1979 SCSI_TM_WRITE_BUFFER,
1981 ccb->std_callback = (ccb_callback) callback; /* Potential trouble */
1983 ccb->transaction_id = ++dpt->commands_processed;
1985 #ifdef DPT_MEASURE_PERFORMANCE
1986 dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd]++;
1987 ccb->command_started = microtime_now;
1989 dpt_Qadd_waiting(dpt, ccb);
1990 dpt_sched_queue(dpt);
1992 mtx_unlock(&dpt->lock);
1995 mtx_unlock(&dpt->lock);
1996 return (DRIVER_DOWN);
2000 dpt_target_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
2004 cp = &ccb->eata_ccb;
2007 * Remove the CCB from the waiting queue.
2008 * We do NOT put it back on the free, etc., queues as it is a special
2009 * ccb, owned by the dpt_softc of this unit.
2011 dpt_Qremove_completed(dpt, ccb);
2013 #define br_channel (ccb->eata_ccb.cp_channel)
2014 #define br_target (ccb->eata_ccb.cp_id)
2015 #define br_lun (ccb->eata_ccb.cp_LUN)
2016 #define br_index [br_channel][br_target][br_lun]
2017 #define read_buffer_callback (dpt->buffer_receiver br_index )
2018 #define read_buffer (dpt->rw_buffer[br_channel][br_target][br_lun])
2019 #define cb(offset) (ccb->eata_ccb.cp_cdb[offset])
2020 #define br_offset ((cb(3) << 16) | (cb(4) << 8) | cb(5))
2021 #define br_length ((cb(6) << 16) | (cb(7) << 8) | cb(8))
2023 /* Different reasons for being here, you know... */
2024 switch (ccb->eata_ccb.cp_scsi_cmd) {
2025 case SCSI_TM_READ_BUFFER:
2026 if (read_buffer_callback != NULL) {
2027 /* This is a buffer generated by a kernel process */
2028 read_buffer_callback(device_get_unit(dpt->dev),
2029 br_channel, br_target, br_lun,
2031 br_offset, br_length);
2034 * This is a buffer waited for by a user (sleeping)
2040 /* We ALWAYS re-issue the same command; args are don't-care */
2041 dpt_set_target(1, 0, 0, 0, 0, 0, 0, 0, 0);
2044 case SCSI_TM_WRITE_BUFFER:
2045 (ccb->wrbuff_callback) (device_get_unit(dpt->dev), br_channel,
2046 br_target, br_offset, br_length,
2047 br_lun, ccb->status_packet.hba_stat);
2050 device_printf(dpt->dev,
2051 "%s is an unsupported command for target mode\n",
2052 scsi_cmd_name(ccb->eata_ccb.cp_scsi_cmd));
2054 dpt->target_ccb[br_channel][br_target][br_lun] = NULL;
2055 dpt_Qpush_free(dpt, ccb);
2060 * Use this function to register a client for a buffer read target operation.
2061 * The function you register will be called every time a buffer is received
2062 * by the target mode code.
2065 dpt_register_buffer(int unit, u_int8_t channel, u_int8_t target, u_int8_t lun,
2066 u_int8_t mode, u_int16_t length, u_int16_t offset,
2067 dpt_rec_buff callback, dpt_rb_op_t op)
2070 dpt_ccb_t *ccb = NULL;
2073 dpt = devclass_get_device(dpt_devclass, unit);
2075 return (INVALID_UNIT);
2076 mtx_lock(&dpt->lock);
2078 if (dpt->state & DPT_HA_SHUTDOWN_ACTIVE) {
2079 mtx_unlock(&dpt->lock);
2080 return (DRIVER_DOWN);
2083 if ((channel > (dpt->channels - 1)) || (target > (dpt->max_id - 1)) ||
2084 (lun > (dpt->max_lun - 1))) {
2085 mtx_unlock(&dpt->lock);
2086 return (INVALID_SENDER);
2089 if (dpt->buffer_receiver[channel][target][lun] == NULL) {
2090 if (op == REGISTER_BUFFER) {
2091 /* Assign the requested callback */
2092 dpt->buffer_receiver[channel][target][lun] = callback;
2095 /* Process the free list */
2096 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2097 device_printf(dpt->dev,
2098 "ERROR: Cannot allocate any more free CCB's.\n"
2099 " Please try later\n");
2100 mtx_unlock(&dpt->lock);
2101 return (NO_RESOURCES);
2103 /* Now grab the newest CCB */
2104 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2105 mtx_unlock(&dpt->lock);
2106 panic("%s: Got a NULL CCB from pop_free()\n",
2107 device_get_nameunit(dpt->dev));
2110 /* Clean up the leftover of the previous tenant */
2111 ccb->status = DPT_CCB_STATE_NEW;
2112 dpt->target_ccb[channel][target][lun] = ccb;
2114 dpt->rw_buffer[channel][target][lun] =
2115 malloc(DPT_RW_BUFFER_SIZE, M_DEVBUF, M_NOWAIT);
2116 if (dpt->rw_buffer[channel][target][lun] == NULL) {
2117 device_printf(dpt->dev, "Failed to allocate "
2118 "Target-Mode buffer\n");
2119 dpt_Qpush_free(dpt, ccb);
2120 mtx_unlock(&dpt->lock);
2121 return (NO_RESOURCES);
2123 dpt_set_target(0, dpt, channel, target, lun, mode,
2124 length, offset, ccb);
2125 mtx_unlock(&dpt->lock);
2126 return (SUCCESSFULLY_REGISTERED);
2128 mtx_unlock(&dpt->lock);
2129 return (NOT_REGISTERED);
2132 if (op == REGISTER_BUFFER) {
2133 if (dpt->buffer_receiver[channel][target][lun] == callback) {
2134 mtx_unlock(&dpt->lock);
2135 return (ALREADY_REGISTERED);
2137 mtx_unlock(&dpt->lock);
2138 return (REGISTERED_TO_ANOTHER);
2141 if (dpt->buffer_receiver[channel][target][lun] == callback) {
2142 dpt->buffer_receiver[channel][target][lun] = NULL;
2143 dpt_Qpush_free(dpt, ccb);
2144 free(dpt->rw_buffer[channel][target][lun], M_DEVBUF);
2145 mtx_unlock(&dpt->lock);
2146 return (SUCCESSFULLY_REGISTERED);
2148 mtx_unlock(&dpt->lock);
2149 return (INVALID_CALLBACK);
2154 mtx_unlock(&dpt->lock);
2157 /* Return the state of the blinking DPT LED's */
2159 dpt_blinking_led(dpt_softc_t * dpt)
2166 mtx_assert(&dpt->lock, MA_OWNED);
2169 for (ndx = 0, state = 0, previous = 0;
2170 (ndx < 10) && (state != previous);
2173 state = dpt_inl(dpt, 1);
2176 if ((state == previous) && (state == DPT_BLINK_INDICATOR))
2177 result = dpt_inb(dpt, 5);
2183 * Execute a command which did not come from the kernel's SCSI layer.
2184 * The only way to map user commands to bus and target is to comply with the
2185 * standard DPT wire-down scheme:
2188 dpt_user_cmd(dpt_softc_t * dpt, eata_pt_t * user_cmd,
2189 caddr_t cmdarg, int minor_no)
2193 int channel, target, lun;
2198 mtx_assert(&dpt->lock, MA_OWNED);
2200 channel = minor2hba(minor_no);
2201 target = minor2target(minor_no);
2202 lun = minor2lun(minor_no);
2204 if ((channel > (dpt->channels - 1))
2205 || (target > dpt->max_id)
2206 || (lun > dpt->max_lun))
2209 if (target == dpt->sc_scsi_link[channel].adapter_targ) {
2210 /* This one is for the controller itself */
2211 if ((user_cmd->eataID[0] != 'E')
2212 || (user_cmd->eataID[1] != 'A')
2213 || (user_cmd->eataID[2] != 'T')
2214 || (user_cmd->eataID[3] != 'A')) {
2218 /* Get a DPT CCB, so we can prepare a command */
2220 /* Process the free list */
2221 if ((TAILQ_EMPTY(&dpt->free_ccbs)) && dpt_alloc_freelist(dpt)) {
2222 device_printf(dpt->dev,
2223 "ERROR: Cannot allocate any more free CCB's.\n"
2224 " Please try later\n");
2227 /* Now grab the newest CCB */
2228 if ((ccb = dpt_Qpop_free(dpt)) == NULL) {
2229 panic("%s: Got a NULL CCB from pop_free()\n",
2230 device_get_nameunit(dpt->dev));
2232 /* Clean up the leftover of the previous tenant */
2233 ccb->status = DPT_CCB_STATE_NEW;
2236 bcopy((caddr_t) & user_cmd->command_packet, (caddr_t) & ccb->eata_ccb,
2237 sizeof(eata_ccb_t));
2239 /* We do not want to do user specified scatter/gather. Why?? */
2240 if (ccb->eata_ccb.scatter == 1)
2243 ccb->eata_ccb.Auto_Req_Sen = 1;
2244 ccb->eata_ccb.reqlen = htonl(sizeof(struct scsi_sense_data));
2245 ccb->eata_ccb.cp_datalen = htonl(sizeof(ccb->eata_ccb.cp_datalen));
2246 ccb->eata_ccb.cp_dataDMA = htonl(vtophys(ccb->eata_ccb.cp_dataDMA));
2247 ccb->eata_ccb.cp_statDMA = htonl(vtophys(&ccb->eata_ccb.cp_statDMA));
2248 ccb->eata_ccb.cp_reqDMA = htonl(vtophys(&ccb->eata_ccb.cp_reqDMA));
2249 ccb->eata_ccb.cp_viraddr = (u_int32_t) & ccb;
2251 if (ccb->eata_ccb.DataIn || ccb->eata_ccb.DataOut) {
2252 /* Data I/O is involved in this command. Alocate buffer */
2253 if (ccb->eata_ccb.cp_datalen > PAGE_SIZE) {
2254 data = contigmalloc(ccb->eata_ccb.cp_datalen,
2255 M_TEMP, M_WAITOK, 0, ~0,
2256 ccb->eata_ccb.cp_datalen,
2259 data = malloc(ccb->eata_ccb.cp_datalen, M_TEMP,
2264 device_printf(dpt->dev, "Cannot allocate %d bytes "
2265 "for EATA command\n",
2266 ccb->eata_ccb.cp_datalen);
2269 #define usr_cmd_DMA (caddr_t)user_cmd->command_packet.cp_dataDMA
2270 if (ccb->eata_ccb.DataIn == 1) {
2271 if (copyin(usr_cmd_DMA,
2272 data, ccb->eata_ccb.cp_datalen) == -1)
2276 /* No data I/O involved here. Make sure the DPT knows that */
2277 ccb->eata_ccb.cp_datalen = 0;
2281 if (ccb->eata_ccb.FWNEST == 1)
2282 ccb->eata_ccb.FWNEST = 0;
2284 if (ccb->eata_ccb.cp_datalen != 0) {
2285 if (dpt_scatter_gather(dpt, ccb, ccb->eata_ccb.cp_datalen,
2293 * We are required to quiet a SCSI bus.
2294 * since we do not queue comands on a bus basis,
2295 * we wait for ALL commands on a controller to complete.
2296 * In the mean time, sched_queue() will not schedule new commands.
2298 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
2299 && (ccb->eata_ccb.cp_cdb[2] == BUS_QUIET)) {
2300 /* We wait for ALL traffic for this HBa to subside */
2301 dpt->state |= DPT_HA_QUIET;
2303 while ((submitted = dpt->submitted_ccbs_count) != 0) {
2304 huh = mtx_sleep((void *) dpt, &dpt->lock,
2305 PCATCH | PRIBIO, "dptqt", 100 * hz);
2308 /* Wakeup call received */
2319 /* Resume normal operation */
2320 if ((ccb->eata_ccb.cp_cdb[0] == MULTIFUNCTION_CMD)
2321 && (ccb->eata_ccb.cp_cdb[2] == BUS_UNQUIET)) {
2322 dpt->state &= ~DPT_HA_QUIET;
2325 * Schedule the command and submit it.
2326 * We bypass dpt_sched_queue, as it will block on DPT_HA_QUIET
2330 ccb->eata_ccb.Auto_Req_Sen = 1; /* We always want this feature */
2332 ccb->transaction_id = ++dpt->commands_processed;
2333 ccb->std_callback = (ccb_callback) dpt_user_cmd_done;
2334 ccb->result = (u_int32_t) & cmdarg;
2337 #ifdef DPT_MEASURE_PERFORMANCE
2338 ++dpt->performance.command_count[ccb->eata_ccb.cp_scsi_cmd];
2339 ccb->command_started = microtime_now;
2341 dpt_Qadd_waiting(dpt, ccb);
2343 dpt_sched_queue(dpt);
2345 /* Wait for the command to complete */
2346 (void) mtx_sleep((void *) ccb, &dpt->lock, PCATCH | PRIBIO, "dptucw",
2349 /* Free allocated memory */
2357 dpt_user_cmd_done(dpt_softc_t * dpt, int bus, dpt_ccb_t * ccb)
2362 mtx_unlock(&dpt->lock);
2365 * If Auto Request Sense is on, copyout the sense struct
2367 #define usr_pckt_DMA (caddr_t)(intptr_t)ntohl(ccb->eata_ccb.cp_reqDMA)
2368 #define usr_pckt_len ntohl(ccb->eata_ccb.cp_datalen)
2369 if (ccb->eata_ccb.Auto_Req_Sen == 1) {
2370 if (copyout((caddr_t) & ccb->sense_data, usr_pckt_DMA,
2371 sizeof(struct scsi_sense_data))) {
2372 mtx_lock(&dpt->lock);
2373 ccb->result = EFAULT;
2374 dpt_Qpush_free(dpt, ccb);
2379 /* If DataIn is on, copyout the data */
2380 if ((ccb->eata_ccb.DataIn == 1)
2381 && (ccb->status_packet.hba_stat == HA_NO_ERROR)) {
2382 if (copyout(ccb->data, usr_pckt_DMA, usr_pckt_len)) {
2383 mtx_lock(&dpt->lock);
2384 dpt_Qpush_free(dpt, ccb);
2385 ccb->result = EFAULT;
2391 /* Copyout the status */
2392 result = ccb->status_packet.hba_stat;
2393 cmd_arg = (caddr_t) ccb->result;
2395 if (copyout((caddr_t) & result, cmd_arg, sizeof(result))) {
2396 mtx_lock(&dpt->lock);
2397 dpt_Qpush_free(dpt, ccb);
2398 ccb->result = EFAULT;
2402 mtx_lock(&dpt->lock);
2403 /* Put the CCB back in the freelist */
2404 ccb->state |= DPT_CCB_STATE_COMPLETED;
2405 dpt_Qpush_free(dpt, ccb);
2407 /* Free allocated memory */