2 * CAM SCSI interface for the Advanced Systems Inc.
3 * Second Generation SCSI controllers.
5 * Product specific probe and attach routines can be found in:
7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W
9 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
11 * Copyright (c) 1998, 1999, 2000 Justin Gibbs.
12 * All rights reserved.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions, and the following disclaimer,
19 * without modification.
20 * 2. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
39 * Copyright (c) 1995-1998 Advanced System Products, Inc.
40 * All Rights Reserved.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
51 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/malloc.h>
57 #include <sys/module.h>
58 #include <sys/mutex.h>
61 #include <machine/bus.h>
62 #include <machine/resource.h>
67 #include <cam/cam_ccb.h>
68 #include <cam/cam_sim.h>
69 #include <cam/cam_xpt_sim.h>
70 #include <cam/cam_debug.h>
72 #include <cam/scsi/scsi_message.h>
74 #include <dev/advansys/adwvar.h>
76 /* Definitions for our use of the SIM private CCB area */
77 #define ccb_acb_ptr spriv_ptr0
78 #define ccb_adw_ptr spriv_ptr1
80 static __inline struct acb* adwgetacb(struct adw_softc *adw);
81 static __inline void adwfreeacb(struct adw_softc *adw,
84 static void adwmapmem(void *arg, bus_dma_segment_t *segs,
86 static struct sg_map_node*
87 adwallocsgmap(struct adw_softc *adw);
88 static int adwallocacbs(struct adw_softc *adw);
90 static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs,
92 static void adw_action(struct cam_sim *sim, union ccb *ccb);
93 static void adw_intr_locked(struct adw_softc *adw);
94 static void adw_poll(struct cam_sim *sim);
95 static void adw_async(void *callback_arg, u_int32_t code,
96 struct cam_path *path, void *arg);
97 static void adwprocesserror(struct adw_softc *adw, struct acb *acb);
98 static void adwtimeout(void *arg);
99 static void adw_handle_device_reset(struct adw_softc *adw,
101 static void adw_handle_bus_reset(struct adw_softc *adw,
104 static __inline struct acb*
105 adwgetacb(struct adw_softc *adw)
110 mtx_assert(&adw->lock, MA_OWNED);
111 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) {
112 SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
113 } else if (adw->num_acbs < adw->max_acbs) {
115 acb = SLIST_FIRST(&adw->free_acb_list);
117 device_printf(adw->device, "Can't malloc ACB\n");
119 SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
127 adwfreeacb(struct adw_softc *adw, struct acb *acb)
131 mtx_assert(&adw->lock, MA_OWNED);
132 if ((acb->state & ACB_ACTIVE) != 0)
133 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le);
134 if ((acb->state & ACB_RELEASE_SIMQ) != 0)
135 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
136 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0
137 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
138 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
139 adw->state &= ~ADW_RESOURCE_SHORTAGE;
141 acb->state = ACB_FREE;
142 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links);
146 adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
148 bus_addr_t *busaddrp;
150 busaddrp = (bus_addr_t *)arg;
151 *busaddrp = segs->ds_addr;
154 static struct sg_map_node *
155 adwallocsgmap(struct adw_softc *adw)
157 struct sg_map_node *sg_map;
159 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
164 /* Allocate S/G space for the next batch of ACBS */
165 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr,
166 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
167 free(sg_map, M_DEVBUF);
171 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links);
173 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
174 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0);
176 bzero(sg_map->sg_vaddr, PAGE_SIZE);
181 * Allocate another chunk of CCB's. Return count of entries added.
184 adwallocacbs(struct adw_softc *adw)
186 struct acb *next_acb;
187 struct sg_map_node *sg_map;
189 struct adw_sg_block *blocks;
193 next_acb = &adw->acbs[adw->num_acbs];
194 sg_map = adwallocsgmap(adw);
199 blocks = sg_map->sg_vaddr;
200 busaddr = sg_map->sg_physaddr;
202 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks)));
203 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) {
206 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0,
210 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb);
211 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb);
212 next_acb->queue.sense_baddr =
213 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data);
214 next_acb->sg_blocks = blocks;
215 next_acb->sg_busaddr = busaddr;
216 next_acb->state = ACB_FREE;
217 callout_init_mtx(&next_acb->timer, &adw->lock, 0);
218 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links);
219 blocks += ADW_SG_BLOCKCNT;
220 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks);
228 adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
232 struct adw_softc *adw;
234 acb = (struct acb *)arg;
236 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
239 mtx_assert(&adw->lock, MA_OWNED);
242 device_printf(adw->device, "Unexepected error 0x%x "
243 "returned from bus_dmamap_load\n", error);
244 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
245 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
246 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
248 adwfreeacb(adw, acb);
256 acb->queue.data_addr = dm_segs[0].ds_addr;
257 acb->queue.data_cnt = ccb->csio.dxfer_len;
259 struct adw_sg_block *sg_block;
260 struct adw_sg_elm *sg;
261 bus_addr_t sg_busaddr;
263 bus_dma_segment_t *end_seg;
265 end_seg = dm_segs + nseg;
267 sg_busaddr = acb->sg_busaddr;
269 /* Copy the segments into our SG list */
270 for (sg_block = acb->sg_blocks;; sg_block++) {
273 sg = sg_block->sg_list;
274 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) {
275 if (dm_segs >= end_seg)
278 sg->sg_addr = dm_segs->ds_addr;
279 sg->sg_count = dm_segs->ds_len;
283 sg_block->sg_cnt = i;
285 if (dm_segs == end_seg) {
286 sg_block->sg_busaddr_next = 0;
290 sizeof(struct adw_sg_block);
291 sg_block->sg_busaddr_next = sg_busaddr;
294 acb->queue.sg_real_addr = acb->sg_busaddr;
296 acb->queue.sg_real_addr = 0;
299 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
300 op = BUS_DMASYNC_PREREAD;
302 op = BUS_DMASYNC_PREWRITE;
304 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
307 acb->queue.data_addr = 0;
308 acb->queue.data_cnt = 0;
309 acb->queue.sg_real_addr = 0;
313 * Last time we need to check if this CCB needs to
316 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
318 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
319 adwfreeacb(adw, acb);
324 acb->state |= ACB_ACTIVE;
325 ccb->ccb_h.status |= CAM_SIM_QUEUED;
326 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le);
327 callout_reset_sbt(&acb->timer, SBT_1MS * ccb->ccb_h.timeout, 0,
330 adw_send_acb(adw, acb, acbvtob(adw, acb));
334 adw_action(struct cam_sim *sim, union ccb *ccb)
336 struct adw_softc *adw;
338 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n"));
340 adw = (struct adw_softc *)cam_sim_softc(sim);
342 mtx_assert(&adw->lock, MA_OWNED);
344 switch (ccb->ccb_h.func_code) {
345 /* Common cases first */
346 case XPT_SCSI_IO: /* Execute the requested I/O operation */
348 struct ccb_scsiio *csio;
354 /* Max supported CDB length is 12 bytes */
355 if (csio->cdb_len > 12) {
356 ccb->ccb_h.status = CAM_REQ_INVALID;
361 if ((acb = adwgetacb(adw)) == NULL) {
362 adw->state |= ADW_RESOURCE_SHORTAGE;
363 xpt_freeze_simq(sim, /*count*/1);
364 ccb->ccb_h.status = CAM_REQUEUE_REQ;
369 /* Link acb and ccb so we can find one from the other */
371 ccb->ccb_h.ccb_acb_ptr = acb;
372 ccb->ccb_h.ccb_adw_ptr = adw;
375 acb->queue.target_cmd = 0;
376 acb->queue.target_id = ccb->ccb_h.target_id;
377 acb->queue.target_lun = ccb->ccb_h.target_lun;
379 acb->queue.mflag = 0;
380 acb->queue.sense_len =
381 MIN(csio->sense_len, sizeof(acb->sense_data));
382 acb->queue.cdb_len = csio->cdb_len;
383 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
384 switch (csio->tag_action) {
385 case MSG_SIMPLE_Q_TAG:
386 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG;
388 case MSG_HEAD_OF_Q_TAG:
389 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG;
391 case MSG_ORDERED_Q_TAG:
392 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG;
395 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
399 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
401 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0)
402 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC;
404 acb->queue.done_status = 0;
405 acb->queue.scsi_status = 0;
406 acb->queue.host_status = 0;
407 acb->queue.sg_wk_ix = 0;
408 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
409 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
410 bcopy(csio->cdb_io.cdb_ptr,
411 acb->queue.cdb, csio->cdb_len);
413 /* I guess I could map it in... */
414 ccb->ccb_h.status = CAM_REQ_INVALID;
415 adwfreeacb(adw, acb);
420 bcopy(csio->cdb_io.cdb_bytes,
421 acb->queue.cdb, csio->cdb_len);
424 error = bus_dmamap_load_ccb(adw->buffer_dmat,
429 if (error == EINPROGRESS) {
431 * So as to maintain ordering, freeze the controller
432 * queue until our mapping is returned.
434 xpt_freeze_simq(sim, 1);
435 acb->state |= CAM_RELEASE_SIMQ;
439 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
441 adw_idle_cmd_status_t status;
443 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
444 ccb->ccb_h.target_id);
445 if (status == ADW_IDLE_CMD_SUCCESS) {
446 ccb->ccb_h.status = CAM_REQ_CMP;
448 xpt_print_path(ccb->ccb_h.path);
449 printf("BDR Delivered\n");
452 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
456 case XPT_ABORT: /* Abort the specified CCB */
458 ccb->ccb_h.status = CAM_REQ_INVALID;
461 case XPT_SET_TRAN_SETTINGS:
463 struct ccb_trans_settings_scsi *scsi;
464 struct ccb_trans_settings_spi *spi;
465 struct ccb_trans_settings *cts;
469 target_mask = 0x01 << ccb->ccb_h.target_id;
471 scsi = &cts->proto_specific.scsi;
472 spi = &cts->xport_specific.spi;
473 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
476 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE);
477 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
481 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE);
483 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
484 discenb |= target_mask;
486 discenb &= ~target_mask;
488 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE,
492 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
494 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
495 adw->tagenb |= target_mask;
497 adw->tagenb &= ~target_mask;
500 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
506 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE);
507 wdtrenb = wdtrenb_orig;
508 wdtrdone = adw_lram_read_16(adw,
510 switch (spi->bus_width) {
511 case MSG_EXT_WDTR_BUS_32_BIT:
512 case MSG_EXT_WDTR_BUS_16_BIT:
513 wdtrenb |= target_mask;
515 case MSG_EXT_WDTR_BUS_8_BIT:
517 wdtrenb &= ~target_mask;
520 if (wdtrenb != wdtrenb_orig) {
521 adw_lram_write_16(adw,
524 wdtrdone &= ~target_mask;
525 adw_lram_write_16(adw,
528 /* Wide negotiation forces async */
529 sdtrdone &= ~target_mask;
530 adw_lram_write_16(adw,
536 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
537 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
543 sdtr = adw_get_chip_sdtr(adw,
544 ccb->ccb_h.target_id);
546 sdtrable = adw_lram_read_16(adw,
548 sdtrable_orig = sdtrable;
551 & CTS_SPI_VALID_SYNC_RATE) != 0) {
559 & CTS_SPI_VALID_SYNC_OFFSET) != 0) {
560 if (spi->sync_offset == 0)
561 sdtr = ADW_MC_SDTR_ASYNC;
564 if (sdtr == ADW_MC_SDTR_ASYNC)
565 sdtrable &= ~target_mask;
567 sdtrable |= target_mask;
568 if (sdtr != sdtr_orig
569 || sdtrable != sdtrable_orig) {
570 adw_set_chip_sdtr(adw,
571 ccb->ccb_h.target_id,
573 sdtrdone &= ~target_mask;
574 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE,
576 adw_lram_write_16(adw, ADW_MC_SDTR_DONE,
582 ccb->ccb_h.status = CAM_REQ_CMP;
586 case XPT_GET_TRAN_SETTINGS:
587 /* Get default/user set transfer settings for the target */
589 struct ccb_trans_settings_scsi *scsi;
590 struct ccb_trans_settings_spi *spi;
591 struct ccb_trans_settings *cts;
595 target_mask = 0x01 << ccb->ccb_h.target_id;
596 cts->protocol = PROTO_SCSI;
597 cts->protocol_version = SCSI_REV_2;
598 cts->transport = XPORT_SPI;
599 cts->transport_version = 2;
601 scsi = &cts->proto_specific.scsi;
602 spi = &cts->xport_specific.spi;
603 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
607 if ((adw->user_discenb & target_mask) != 0)
608 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
610 if ((adw->user_tagenb & target_mask) != 0)
611 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
613 if ((adw->user_wdtr & target_mask) != 0)
614 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
616 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
618 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id);
619 spi->sync_period = adw_find_period(adw, mc_sdtr);
620 if (spi->sync_period != 0)
621 spi->sync_offset = 15; /* XXX ??? */
623 spi->sync_offset = 0;
630 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE)
632 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
634 if ((adw->tagenb & target_mask) != 0)
635 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
638 adw_lram_read_16(adw,
639 ADW_MC_DEVICE_HSHK_CFG_TABLE
640 + (2 * ccb->ccb_h.target_id));
642 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0)
643 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
645 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
648 adw_hshk_cfg_period_factor(targ_tinfo);
650 spi->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET;
651 if (spi->sync_period == 0)
652 spi->sync_offset = 0;
654 if (spi->sync_offset == 0)
655 spi->sync_period = 0;
658 spi->valid = CTS_SPI_VALID_SYNC_RATE
659 | CTS_SPI_VALID_SYNC_OFFSET
660 | CTS_SPI_VALID_BUS_WIDTH
661 | CTS_SPI_VALID_DISC;
662 scsi->valid = CTS_SCSI_VALID_TQ;
663 ccb->ccb_h.status = CAM_REQ_CMP;
667 case XPT_CALC_GEOMETRY:
670 * XXX Use Adaptec translation until I find out how to
671 * get this information from the card.
673 cam_calc_geometry(&ccb->ccg, /*extended*/1);
677 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
681 failure = adw_reset_bus(adw);
683 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
686 xpt_print_path(adw->path);
687 printf("Bus Reset Delivered\n");
689 ccb->ccb_h.status = CAM_REQ_CMP;
694 case XPT_TERM_IO: /* Terminate the I/O process */
696 ccb->ccb_h.status = CAM_REQ_INVALID;
699 case XPT_PATH_INQ: /* Path routing inquiry */
701 struct ccb_pathinq *cpi = &ccb->cpi;
703 cpi->version_num = 1;
704 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE;
705 cpi->target_sprt = 0;
707 cpi->hba_eng_cnt = 0;
708 cpi->max_target = ADW_MAX_TID;
709 cpi->max_lun = ADW_MAX_LUN;
710 cpi->initiator_id = adw->initiator_id;
711 cpi->bus_id = cam_sim_bus(sim);
712 cpi->base_transfer_speed = 3300;
713 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
714 strlcpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN);
715 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
716 cpi->unit_number = cam_sim_unit(sim);
717 cpi->transport = XPORT_SPI;
718 cpi->transport_version = 2;
719 cpi->protocol = PROTO_SCSI;
720 cpi->protocol_version = SCSI_REV_2;
721 cpi->ccb_h.status = CAM_REQ_CMP;
726 ccb->ccb_h.status = CAM_REQ_INVALID;
733 adw_poll(struct cam_sim *sim)
735 adw_intr_locked(cam_sim_softc(sim));
739 adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
744 adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id)
746 struct adw_softc *adw;
748 adw = device_get_softc(dev);
749 LIST_INIT(&adw->pending_ccbs);
750 SLIST_INIT(&adw->sg_maps);
751 mtx_init(&adw->lock, "adw", NULL, MTX_DEF);
753 adw->regs_res_type = regs_type;
754 adw->regs_res_id = regs_id;
760 adw_free(struct adw_softc *adw)
762 switch (adw->init_level) {
765 struct sg_map_node *sg_map;
767 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) {
768 SLIST_REMOVE_HEAD(&adw->sg_maps, links);
769 bus_dmamap_unload(adw->sg_dmat,
771 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr,
773 free(sg_map, M_DEVBUF);
775 bus_dma_tag_destroy(adw->sg_dmat);
778 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap);
780 bus_dmamem_free(adw->acb_dmat, adw->acbs,
783 bus_dma_tag_destroy(adw->acb_dmat);
785 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap);
787 bus_dmamem_free(adw->carrier_dmat, adw->carriers,
788 adw->carrier_dmamap);
790 bus_dma_tag_destroy(adw->carrier_dmat);
792 bus_dma_tag_destroy(adw->buffer_dmat);
794 bus_dma_tag_destroy(adw->parent_dmat);
799 if (adw->regs != NULL)
800 bus_release_resource(adw->device,
805 if (adw->irq != NULL)
806 bus_release_resource(adw->device,
810 if (adw->sim != NULL) {
811 if (adw->path != NULL) {
812 xpt_async(AC_LOST_DEVICE, adw->path, NULL);
813 xpt_free_path(adw->path);
815 xpt_bus_deregister(cam_sim_path(adw->sim));
816 cam_sim_free(adw->sim, /*free_devq*/TRUE);
818 mtx_destroy(&adw->lock);
822 adw_init(struct adw_softc *adw)
824 struct adw_eeprom eep_config;
830 checksum = adw_eeprom_read(adw, &eep_config);
831 bcopy(eep_config.serial_number, adw->serial_number,
832 sizeof(adw->serial_number));
833 if (checksum != eep_config.checksum) {
834 u_int16_t serial_number[3];
836 adw->flags |= ADW_EEPROM_FAILED;
837 device_printf(adw->device,
838 "EEPROM checksum failed. Restoring Defaults\n");
841 * Restore the default EEPROM settings.
842 * Assume the 6 byte board serial number that was read
843 * from EEPROM is correct even if the EEPROM checksum
846 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config));
847 bcopy(adw->serial_number, eep_config.serial_number,
848 sizeof(serial_number));
849 adw_eeprom_write(adw, &eep_config);
852 /* Pull eeprom information into our softc. */
853 adw->bios_ctrl = eep_config.bios_ctrl;
854 adw->user_wdtr = eep_config.wdtr_able;
855 for (tid = 0; tid < ADW_MAX_TID; tid++) {
859 tid_mask = 0x1 << tid;
860 if ((adw->features & ADW_ULTRA) != 0) {
862 * Ultra chips store sdtr and ultraenb
863 * bits in their seeprom, so we must
864 * construct valid mc_sdtr entries for
867 if (eep_config.sync1.sync_enable & tid_mask) {
868 if (eep_config.sync2.ultra_enable & tid_mask)
869 mc_sdtr = ADW_MC_SDTR_20;
871 mc_sdtr = ADW_MC_SDTR_10;
873 mc_sdtr = ADW_MC_SDTR_ASYNC;
875 switch (ADW_TARGET_GROUP(tid)) {
877 mc_sdtr = eep_config.sync4.sdtr4;
880 mc_sdtr = eep_config.sync3.sdtr3;
883 mc_sdtr = eep_config.sync2.sdtr2;
885 default: /* Shut up compiler */
887 mc_sdtr = eep_config.sync1.sdtr1;
890 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid);
893 adw_set_user_sdtr(adw, tid, mc_sdtr);
895 adw->user_tagenb = eep_config.tagqng_able;
896 adw->user_discenb = eep_config.disc_enable;
897 adw->max_acbs = eep_config.max_host_qng;
898 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID);
901 * Sanity check the number of host openings.
903 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG)
904 adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
905 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) {
906 /* If the value is zero, assume it is uninitialized. */
907 if (adw->max_acbs == 0)
908 adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
910 adw->max_acbs = ADW_DEF_MIN_HOST_QNG;
914 if ((adw->features & ADW_ULTRA2) != 0) {
915 switch (eep_config.termination_lvd) {
917 device_printf(adw->device,
918 "Invalid EEPROM LVD Termination Settings.\n");
919 device_printf(adw->device,
920 "Reverting to Automatic LVD Termination\n");
922 case ADW_EEPROM_TERM_AUTO:
924 case ADW_EEPROM_TERM_BOTH_ON:
925 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO;
927 case ADW_EEPROM_TERM_HIGH_ON:
928 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI;
930 case ADW_EEPROM_TERM_OFF:
931 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV;
936 switch (eep_config.termination_se) {
938 device_printf(adw->device,
939 "Invalid SE EEPROM Termination Settings.\n");
940 device_printf(adw->device,
941 "Reverting to Automatic SE Termination\n");
943 case ADW_EEPROM_TERM_AUTO:
945 case ADW_EEPROM_TERM_BOTH_ON:
946 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L;
948 case ADW_EEPROM_TERM_HIGH_ON:
949 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H;
951 case ADW_EEPROM_TERM_OFF:
952 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL;
955 device_printf(adw->device, "SCSI ID %d, ", adw->initiator_id);
957 /* DMA tag for mapping buffers into device visible space. */
958 if (bus_dma_tag_create(
959 /* parent */ adw->parent_dmat,
962 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT,
963 /* highaddr */ BUS_SPACE_MAXADDR,
965 /* filterarg */ NULL,
966 /* maxsize */ DFLTPHYS,
967 /* nsegments */ ADW_SGSIZE,
968 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
969 /* flags */ BUS_DMA_ALLOCNOW,
970 /* lockfunc */ busdma_lock_mutex,
971 /* lockarg */ &adw->lock,
972 &adw->buffer_dmat) != 0) {
978 /* DMA tag for our ccb carrier structures */
979 if (bus_dma_tag_create(
980 /* parent */ adw->parent_dmat,
981 /* alignment */ 0x10,
983 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT,
984 /* highaddr */ BUS_SPACE_MAXADDR,
986 /* filterarg */ NULL,
987 /* maxsize */ (adw->max_acbs +
988 ADW_NUM_CARRIER_QUEUES + 1) *
989 sizeof(struct adw_carrier),
991 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
995 &adw->carrier_dmat) != 0) {
1001 /* Allocation for our ccb carrier structures */
1002 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers,
1003 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) {
1009 /* And permanently map them */
1010 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap,
1012 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1013 * sizeof(struct adw_carrier),
1014 adwmapmem, &adw->carrier_busbase, /*flags*/0);
1016 /* Clear them out. */
1017 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1018 * sizeof(struct adw_carrier));
1020 /* Setup our free carrier list */
1021 adw->free_carriers = adw->carriers;
1022 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) {
1023 adw->carriers[i].carr_offset =
1024 carriervtobo(adw, &adw->carriers[i]);
1025 adw->carriers[i].carr_ba =
1026 carriervtob(adw, &adw->carriers[i]);
1027 adw->carriers[i].areq_ba = 0;
1028 adw->carriers[i].next_ba =
1029 carriervtobo(adw, &adw->carriers[i+1]);
1031 /* Terminal carrier. Never leaves the freelist */
1032 adw->carriers[i].carr_offset =
1033 carriervtobo(adw, &adw->carriers[i]);
1034 adw->carriers[i].carr_ba =
1035 carriervtob(adw, &adw->carriers[i]);
1036 adw->carriers[i].areq_ba = 0;
1037 adw->carriers[i].next_ba = ~0;
1041 /* DMA tag for our acb structures */
1042 if (bus_dma_tag_create(
1043 /* parent */ adw->parent_dmat,
1046 /* lowaddr */ BUS_SPACE_MAXADDR,
1047 /* highaddr */ BUS_SPACE_MAXADDR,
1049 /* filterarg */ NULL,
1050 /* maxsize */ adw->max_acbs * sizeof(struct acb),
1052 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1054 /* lockfunc */ NULL,
1056 &adw->acb_dmat) != 0) {
1062 /* Allocation for our ccbs */
1063 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs,
1064 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0)
1069 /* And permanently map them */
1070 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap,
1072 adw->max_acbs * sizeof(struct acb),
1073 adwmapmem, &adw->acb_busbase, /*flags*/0);
1075 /* Clear them out. */
1076 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb));
1078 /* DMA tag for our S/G structures. We allocate in page sized chunks */
1079 if (bus_dma_tag_create(
1080 /* parent */ adw->parent_dmat,
1083 /* lowaddr */ BUS_SPACE_MAXADDR,
1084 /* highaddr */ BUS_SPACE_MAXADDR,
1086 /* filterarg */ NULL,
1087 /* maxsize */ PAGE_SIZE,
1089 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1091 /* lockfunc */ NULL,
1093 &adw->sg_dmat) != 0) {
1099 /* Allocate our first batch of ccbs */
1100 mtx_lock(&adw->lock);
1101 if (adwallocacbs(adw) == 0) {
1102 mtx_unlock(&adw->lock);
1106 if (adw_init_chip(adw, scsicfg1) != 0) {
1107 mtx_unlock(&adw->lock);
1111 printf("Queue Depth %d\n", adw->max_acbs);
1112 mtx_unlock(&adw->lock);
1118 * Attach all the sub-devices we can find
1121 adw_attach(struct adw_softc *adw)
1123 struct ccb_setasync csa;
1124 struct cam_devq *devq;
1127 /* Hook up our interrupt handler */
1128 error = bus_setup_intr(adw->device, adw->irq,
1129 INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, adw_intr, adw,
1132 device_printf(adw->device, "bus_setup_intr() failed: %d\n",
1137 /* Start the Risc processor now that we are fully configured. */
1138 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN);
1141 * Create the device queue for our SIM.
1143 devq = cam_simq_alloc(adw->max_acbs);
1148 * Construct our SIM entry.
1150 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw,
1151 device_get_unit(adw->device), &adw->lock, 1, adw->max_acbs, devq);
1152 if (adw->sim == NULL)
1158 mtx_lock(&adw->lock);
1159 if (xpt_bus_register(adw->sim, adw->device, 0) != CAM_SUCCESS) {
1160 cam_sim_free(adw->sim, /*free devq*/TRUE);
1165 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim),
1166 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1168 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5);
1169 csa.ccb_h.func_code = XPT_SASYNC_CB;
1170 csa.event_enable = AC_LOST_DEVICE;
1171 csa.callback = adw_async;
1172 csa.callback_arg = adw;
1173 xpt_action((union ccb *)&csa);
1176 gone_in_dev(adw->device, 12, "adw(4) driver");
1178 mtx_unlock(&adw->lock);
1185 struct adw_softc *adw;
1188 mtx_lock(&adw->lock);
1189 adw_intr_locked(adw);
1190 mtx_unlock(&adw->lock);
1194 adw_intr_locked(struct adw_softc *adw)
1198 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0)
1201 /* Reading the register clears the interrupt. */
1202 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG);
1204 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) {
1207 /* Async Microcode Event */
1208 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE);
1209 switch (intrb_code) {
1210 case ADW_ASYNC_CARRIER_READY_FAILURE:
1212 * The RISC missed our update of
1215 if (LIST_FIRST(&adw->pending_ccbs) != NULL)
1216 adw_tickle_risc(adw, ADW_TICKLE_A);
1218 case ADW_ASYNC_SCSI_BUS_RESET_DET:
1220 * The firmware detected a SCSI Bus reset.
1222 device_printf(adw->device, "Someone Reset the Bus\n");
1223 adw_handle_bus_reset(adw, /*initiated*/FALSE);
1225 case ADW_ASYNC_RDMA_FAILURE:
1227 * Handle RDMA failure by resetting the
1228 * SCSI Bus and chip.
1231 AdvResetChipAndSB(adv_dvc_varp);
1235 case ADW_ASYNC_HOST_SCSI_BUS_RESET:
1237 * Host generated SCSI bus reset occurred.
1239 adw_handle_bus_reset(adw, /*initiated*/TRUE);
1242 printf("adw_intr: unknown async code 0x%x\n",
1249 * Run down the RequestQ.
1251 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) {
1252 struct adw_carrier *free_carrier;
1257 printf("0x%x, 0x%x, 0x%x, 0x%x\n",
1258 adw->responseq->carr_offset,
1259 adw->responseq->carr_ba,
1260 adw->responseq->areq_ba,
1261 adw->responseq->next_ba);
1264 * The firmware copies the adw_scsi_req_q.acb_baddr
1265 * field into the areq_ba field of the carrier.
1267 acb = acbbotov(adw, adw->responseq->areq_ba);
1270 * The least significant four bits of the next_ba
1271 * field are used as flags. Mask them out and then
1272 * advance through the list.
1274 free_carrier = adw->responseq;
1276 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK);
1277 free_carrier->next_ba = adw->free_carriers->carr_offset;
1278 adw->free_carriers = free_carrier;
1282 callout_stop(&acb->timer);
1283 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1284 bus_dmasync_op_t op;
1286 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1287 op = BUS_DMASYNC_POSTREAD;
1289 op = BUS_DMASYNC_POSTWRITE;
1290 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
1291 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
1292 ccb->csio.resid = acb->queue.data_cnt;
1294 ccb->csio.resid = 0;
1296 /* Common Cases inline... */
1297 if (acb->queue.host_status == QHSTA_NO_ERROR
1298 && (acb->queue.done_status == QD_NO_ERROR
1299 || acb->queue.done_status == QD_WITH_ERROR)) {
1300 ccb->csio.scsi_status = acb->queue.scsi_status;
1301 ccb->ccb_h.status = 0;
1302 switch (ccb->csio.scsi_status) {
1303 case SCSI_STATUS_OK:
1304 ccb->ccb_h.status |= CAM_REQ_CMP;
1306 case SCSI_STATUS_CHECK_COND:
1307 case SCSI_STATUS_CMD_TERMINATED:
1308 bcopy(&acb->sense_data, &ccb->csio.sense_data,
1309 ccb->csio.sense_len);
1310 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1311 ccb->csio.sense_resid = acb->queue.sense_len;
1314 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR
1316 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1319 adwfreeacb(adw, acb);
1322 adwprocesserror(adw, acb);
1328 adwprocesserror(struct adw_softc *adw, struct acb *acb)
1333 if (acb->queue.done_status == QD_ABORTED_BY_HOST) {
1334 ccb->ccb_h.status = CAM_REQ_ABORTED;
1337 switch (acb->queue.host_status) {
1338 case QHSTA_M_SEL_TIMEOUT:
1339 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1341 case QHSTA_M_SXFR_OFF_UFLW:
1342 case QHSTA_M_SXFR_OFF_OFLW:
1343 case QHSTA_M_DATA_OVER_RUN:
1344 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1346 case QHSTA_M_SXFR_DESELECTED:
1347 case QHSTA_M_UNEXPECTED_BUS_FREE:
1348 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1350 case QHSTA_M_SCSI_BUS_RESET:
1351 case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1352 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1354 case QHSTA_M_BUS_DEVICE_RESET:
1355 ccb->ccb_h.status = CAM_BDR_SENT;
1357 case QHSTA_M_QUEUE_ABORTED:
1358 /* BDR or Bus Reset */
1359 xpt_print_path(adw->path);
1360 printf("Saw Queue Aborted\n");
1361 ccb->ccb_h.status = adw->last_reset;
1363 case QHSTA_M_SXFR_SDMA_ERR:
1364 case QHSTA_M_SXFR_SXFR_PERR:
1365 case QHSTA_M_RDMA_PERR:
1366 ccb->ccb_h.status = CAM_UNCOR_PARITY;
1368 case QHSTA_M_WTM_TIMEOUT:
1369 case QHSTA_M_SXFR_WD_TMO:
1371 /* The SCSI bus hung in a phase */
1372 xpt_print_path(adw->path);
1373 printf("Watch Dog timer expired. Resetting bus\n");
1377 case QHSTA_M_SXFR_XFR_PH_ERR:
1378 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1380 case QHSTA_M_SXFR_UNKNOWN_ERROR:
1382 case QHSTA_M_BAD_CMPL_STATUS_IN:
1383 /* No command complete after a status message */
1384 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1386 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1387 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1389 case QHSTA_M_INVALID_DEVICE:
1390 ccb->ccb_h.status = CAM_PATH_INVALID;
1392 case QHSTA_M_NO_AUTO_REQ_SENSE:
1394 * User didn't request sense, but we got a
1397 ccb->csio.scsi_status = acb->queue.scsi_status;
1398 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1401 panic("%s: Unhandled Host status error %x",
1402 device_get_nameunit(adw->device),
1403 acb->queue.host_status);
1407 if ((acb->state & ACB_RECOVERY_ACB) != 0) {
1408 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET
1409 || ccb->ccb_h.status == CAM_BDR_SENT)
1410 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1412 if (ccb->ccb_h.status != CAM_REQ_CMP) {
1413 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1414 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1416 adwfreeacb(adw, acb);
1421 adwtimeout(void *arg)
1425 struct adw_softc *adw;
1426 adw_idle_cmd_status_t status;
1429 acb = (struct acb *)arg;
1431 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
1432 xpt_print_path(ccb->ccb_h.path);
1433 printf("ACB %p - timed out\n", (void *)acb);
1435 mtx_assert(&adw->lock, MA_OWNED);
1437 if ((acb->state & ACB_ACTIVE) == 0) {
1438 xpt_print_path(ccb->ccb_h.path);
1439 printf("ACB %p - timed out CCB already completed\n",
1444 acb->state |= ACB_RECOVERY_ACB;
1445 target_id = ccb->ccb_h.target_id;
1447 /* Attempt a BDR first */
1448 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
1449 ccb->ccb_h.target_id);
1450 if (status == ADW_IDLE_CMD_SUCCESS) {
1451 device_printf(adw->device,
1452 "BDR Delivered. No longer in timeout\n");
1453 adw_handle_device_reset(adw, target_id);
1456 xpt_print_path(adw->path);
1457 printf("Bus Reset Delivered. No longer in timeout\n");
1462 adw_handle_device_reset(struct adw_softc *adw, u_int target)
1464 struct cam_path *path;
1467 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim),
1468 target, CAM_LUN_WILDCARD);
1470 if (error == CAM_REQ_CMP) {
1471 xpt_async(AC_SENT_BDR, path, NULL);
1472 xpt_free_path(path);
1474 adw->last_reset = CAM_BDR_SENT;
1478 adw_handle_bus_reset(struct adw_softc *adw, int initiated)
1482 * The microcode currently sets the SCSI Bus Reset signal
1483 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET
1484 * command above. But the SCSI Bus Reset Hold Time in the
1485 * microcode is not deterministic (it may in fact be for less
1486 * than the SCSI Spec. minimum of 25 us). Therefore on return
1487 * the Adv Library sets the SCSI Bus Reset signal for
1488 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
1493 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT;
1494 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT);
1495 DELAY(ADW_SCSI_RESET_HOLD_TIME_US);
1496 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl);
1499 * We will perform the async notification when the
1500 * SCSI Reset interrupt occurs.
1503 xpt_async(AC_BUS_RESET, adw->path, NULL);
1504 adw->last_reset = CAM_SCSI_BUS_RESET;
1506 MODULE_DEPEND(adw, cam, 1, 1, 1);