2 *********************************************************************
4 * BY : C.L. Huang (ching@tekram.com.tw)
5 * Erich Chen (erich@tekram.com.tw)
6 * Description: Device Driver for the amd53c974 PCI Bus Master
7 * SCSI Host adapter found on cards such as
8 * the Tekram DC-390(T).
9 * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *********************************************************************
37 *********************************************************************
40 * REV# DATE NAME DESCRIPTION
41 * 1.00 07/02/96 CLH First release for RELEASE-2.1.0
42 * 1.01 08/20/96 CLH Update for RELEASE-2.1.5
43 * 1.02 11/06/96 CLH Fixed more than 1 LUN scanning
44 * 1.03 12/20/96 CLH Modify to support 2.2-ALPHA
45 * 1.04 12/26/97 CLH Modify to support RELEASE-2.2.5
46 * 1.05 01/01/99 ERICH CHEN Modify to support RELEASE-3.0.x (CAM)
47 *********************************************************************
50 /* #define AMD_DEBUG0 */
51 /* #define AMD_DEBUG_SCSI_PHASE */
53 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/queue.h>
57 #include <sys/kernel.h>
58 #include <sys/module.h>
60 #include <sys/mutex.h>
61 #include <sys/malloc.h>
66 #include <machine/bus.h>
67 #include <machine/resource.h>
72 #include <cam/cam_ccb.h>
73 #include <cam/cam_sim.h>
74 #include <cam/cam_xpt_sim.h>
75 #include <cam/cam_debug.h>
77 #include <cam/scsi/scsi_all.h>
78 #include <cam/scsi/scsi_message.h>
80 #include <dev/pci/pcivar.h>
81 #include <dev/pci/pcireg.h>
82 #include <dev/amd/amd.h>
84 #define PCI_DEVICE_ID_AMD53C974 0x20201022ul
85 #define PCI_BASE_ADDR0 0x10
87 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
88 typedef phase_handler_t *phase_handler_func_t;
90 static void amd_intr(void *vamd);
91 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
92 static phase_handler_t amd_NopPhase;
94 static phase_handler_t amd_DataOutPhase0;
95 static phase_handler_t amd_DataInPhase0;
96 #define amd_CommandPhase0 amd_NopPhase
97 static phase_handler_t amd_StatusPhase0;
98 static phase_handler_t amd_MsgOutPhase0;
99 static phase_handler_t amd_MsgInPhase0;
100 static phase_handler_t amd_DataOutPhase1;
101 static phase_handler_t amd_DataInPhase1;
102 static phase_handler_t amd_CommandPhase1;
103 static phase_handler_t amd_StatusPhase1;
104 static phase_handler_t amd_MsgOutPhase1;
105 static phase_handler_t amd_MsgInPhase1;
107 static void amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
108 static int amdparsemsg(struct amd_softc *amd);
109 static int amdhandlemsgreject(struct amd_softc *amd);
110 static void amdconstructsdtr(struct amd_softc *amd,
111 u_int period, u_int offset);
112 static u_int amdfindclockrate(struct amd_softc *amd, u_int *period);
113 static int amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
115 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
116 static void amd_Disconnect(struct amd_softc *amd);
117 static void amd_Reselect(struct amd_softc *amd);
118 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
119 static void amd_ScsiRstDetect(struct amd_softc *amd);
120 static void amd_ResetSCSIBus(struct amd_softc *amd);
121 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
122 static void amd_InvalidCmd(struct amd_softc *amd);
124 static void amd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
128 static void amd_timeout(void *arg1);
129 static void amd_reset(struct amd_softc *amd);
131 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
133 void amd_linkSRB(struct amd_softc *amd);
134 static int amd_init(device_t);
135 static void amd_load_defaults(struct amd_softc *amd);
136 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
137 static int amd_EEpromInDO(struct amd_softc *amd);
138 static u_int16_t EEpromGetData1(struct amd_softc *amd);
139 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
140 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
141 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
142 static void amd_ReadEEprom(struct amd_softc *amd);
144 static int amd_probe(device_t);
145 static int amd_attach(device_t);
146 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
147 lun_id_t lun, u_int tag, struct srb_queue *queue,
149 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
150 u_int period, u_int offset, u_int type);
151 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
153 static __inline void amd_clear_msg_state(struct amd_softc *amd);
156 amd_clear_msg_state(struct amd_softc *amd)
159 amd->msgout_index = 0;
160 amd->msgin_index = 0;
163 static __inline uint32_t
164 amd_get_sense_bufaddr(struct amd_softc *amd, struct amd_srb *pSRB)
168 offset = pSRB->TagNumber;
169 return (amd->sense_busaddr + (offset * sizeof(struct scsi_sense_data)));
172 static __inline struct scsi_sense_data *
173 amd_get_sense_buf(struct amd_softc *amd, struct amd_srb *pSRB)
177 offset = pSRB->TagNumber;
178 return (&amd->sense_buffers[offset]);
181 static __inline uint32_t
182 amd_get_sense_bufsize(struct amd_softc *amd, struct amd_srb *pSRB)
184 return (sizeof(struct scsi_sense_data));
187 /* CAM SIM entry points */
188 #define ccb_srb_ptr spriv_ptr0
189 #define ccb_amd_ptr spriv_ptr1
190 static void amd_action(struct cam_sim *sim, union ccb *ccb);
191 static void amd_poll(struct cam_sim *sim);
194 * State engine function tables indexed by SCSI phase number
196 phase_handler_func_t amd_SCSI_phase0[] = {
207 phase_handler_func_t amd_SCSI_phase1[] = {
219 * EEProm/BIOS negotiation periods
221 u_int8_t eeprom_period[] = {
233 * chip clock setting to SCSI specified sync parameter table.
235 u_int8_t tinfo_sync_period[] = {
248 static __inline struct amd_srb *
249 amdgetsrb(struct amd_softc * amd)
252 struct amd_srb * pSRB;
255 pSRB = TAILQ_FIRST(&amd->free_srbs);
257 TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
263 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
265 struct scsi_request_sense sense_cmd;
269 if (srb->SRBFlag & AUTO_REQSENSE) {
270 sense_cmd.opcode = REQUEST_SENSE;
271 sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
272 sense_cmd.unused[0] = 0;
273 sense_cmd.unused[1] = 0;
274 sense_cmd.length = sizeof(struct scsi_sense_data);
275 sense_cmd.control = 0;
276 cdb = &sense_cmd.opcode;
277 cdb_len = sizeof(sense_cmd);
279 cdb = &srb->CmdBlock[0];
280 cdb_len = srb->ScsiCmdLen;
282 amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
286 * Attempt to start a waiting transaction. Interrupts must be disabled
287 * upon entry to this function.
290 amdrunwaiting(struct amd_softc *amd) {
293 if (amd->last_phase != SCSI_BUS_FREE)
296 srb = TAILQ_FIRST(&amd->waiting_srbs);
300 if (amdstart(amd, srb) == 0) {
301 TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
302 TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
307 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
311 struct amd_softc *amd;
314 srb = (struct amd_srb *)arg;
316 amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
320 printf("amd%d: Unexepected error 0x%x returned from "
321 "bus_dmamap_load\n", amd->unit, error);
322 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
323 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
324 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
326 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
333 bus_dma_segment_t *end_seg;
336 end_seg = dm_segs + nseg;
338 /* Copy the segments into our SG list */
339 srb->pSGlist = &srb->SGsegment[0];
341 while (dm_segs < end_seg) {
342 sg->SGXLen = dm_segs->ds_len;
343 sg->SGXPtr = dm_segs->ds_addr;
348 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
349 op = BUS_DMASYNC_PREREAD;
351 op = BUS_DMASYNC_PREWRITE;
353 bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
358 srb->AdaptStatus = 0;
359 srb->TargetStatus = 0;
364 srb->TotalXferredLen = 0;
366 srb->SGToBeXferLen = 0;
372 * Last time we need to check if this CCB needs to
375 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
377 bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
378 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
383 ccb->ccb_h.status |= CAM_SIM_QUEUED;
385 /* XXX Need a timeout handler */
386 ccb->ccb_h.timeout_ch =
387 timeout(amdtimeout, (caddr_t)srb,
388 (ccb->ccb_h.timeout * hz) / 1000);
390 TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
396 amd_action(struct cam_sim * psim, union ccb * pccb)
398 struct amd_softc * amd;
401 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
403 amd = (struct amd_softc *) cam_sim_softc(psim);
404 target_id = pccb->ccb_h.target_id;
406 switch (pccb->ccb_h.func_code) {
409 struct amd_srb * pSRB;
410 struct ccb_scsiio *pcsio;
415 * Assign an SRB and connect it with this ccb.
417 pSRB = amdgetsrb(amd);
421 pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
426 pccb->ccb_h.ccb_srb_ptr = pSRB;
427 pccb->ccb_h.ccb_amd_ptr = amd;
428 pSRB->ScsiCmdLen = pcsio->cdb_len;
429 bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
430 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
431 if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
433 * We've been given a pointer
434 * to a single buffer.
436 if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
442 bus_dmamap_load(amd->buffer_dmat,
448 if (error == EINPROGRESS) {
451 * ordering, freeze the
453 * until our mapping is
456 xpt_freeze_simq(amd->psim, 1);
457 pccb->ccb_h.status |=
462 struct bus_dma_segment seg;
464 /* Pointer to physical buffer */
466 (bus_addr_t)pcsio->data_ptr;
467 seg.ds_len = pcsio->dxfer_len;
468 amdexecutesrb(pSRB, &seg, 1, 0);
471 struct bus_dma_segment *segs;
473 if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
474 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
475 TAILQ_INSERT_HEAD(&amd->free_srbs,
477 pccb->ccb_h.status = CAM_PROVIDE_FAIL;
482 /* Just use the segments provided */
484 (struct bus_dma_segment *)pcsio->data_ptr;
485 amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
488 amdexecutesrb(pSRB, NULL, 0, 0);
493 struct ccb_pathinq *cpi = &pccb->cpi;
495 cpi->version_num = 1;
496 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
497 cpi->target_sprt = 0;
499 cpi->hba_eng_cnt = 0;
501 cpi->max_lun = amd->max_lun; /* 7 or 0 */
502 cpi->initiator_id = amd->AdaptSCSIID;
503 cpi->bus_id = cam_sim_bus(psim);
504 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
505 strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
506 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
507 cpi->unit_number = cam_sim_unit(psim);
508 cpi->transport = XPORT_SPI;
509 cpi->transport_version = 2;
510 cpi->protocol = PROTO_SCSI;
511 cpi->protocol_version = SCSI_REV_2;
512 cpi->ccb_h.status = CAM_REQ_CMP;
517 pccb->ccb_h.status = CAM_REQ_INVALID;
525 amd_ResetSCSIBus(amd);
528 for (i = 0; i < 500; i++) {
529 DELAY(1000); /* Wait until our interrupt
533 pccb->ccb_h.status = CAM_REQ_CMP;
538 pccb->ccb_h.status = CAM_REQ_INVALID;
542 pccb->ccb_h.status = CAM_REQ_INVALID;
545 case XPT_GET_TRAN_SETTINGS:
547 struct ccb_trans_settings *cts = &pccb->cts;
548 struct amd_target_info *targ_info = &amd->tinfo[target_id];
549 struct amd_transinfo *tinfo;
551 struct ccb_trans_settings_scsi *scsi =
552 &cts->proto_specific.scsi;
553 struct ccb_trans_settings_spi *spi =
554 &cts->xport_specific.spi;
556 cts->protocol = PROTO_SCSI;
557 cts->protocol_version = SCSI_REV_2;
558 cts->transport = XPORT_SPI;
559 cts->transport_version = 2;
562 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
563 /* current transfer settings */
564 if (targ_info->disc_tag & AMD_CUR_DISCENB) {
565 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
569 if (targ_info->disc_tag & AMD_CUR_TAGENB) {
570 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
574 tinfo = &targ_info->current;
576 /* default(user) transfer settings */
577 if (targ_info->disc_tag & AMD_USR_DISCENB) {
578 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
582 if (targ_info->disc_tag & AMD_USR_TAGENB) {
583 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
587 tinfo = &targ_info->user;
589 spi->sync_period = tinfo->period;
590 spi->sync_offset = tinfo->offset;
593 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
594 spi->valid = CTS_SPI_VALID_SYNC_RATE
595 | CTS_SPI_VALID_SYNC_OFFSET
596 | CTS_SPI_VALID_BUS_WIDTH
597 | CTS_SPI_VALID_DISC;
598 scsi->valid = CTS_SCSI_VALID_TQ;
599 pccb->ccb_h.status = CAM_REQ_CMP;
603 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
604 #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS)
605 case XPT_SET_TRAN_SETTINGS:
607 struct ccb_trans_settings *cts = &pccb->cts;
608 struct amd_target_info *targ_info;
609 u_int update_type = 0;
612 struct ccb_trans_settings_scsi *scsi =
613 &cts->proto_specific.scsi;
614 struct ccb_trans_settings_spi *spi =
615 &cts->xport_specific.spi;
616 if (IS_CURRENT_SETTINGS(cts)) {
617 update_type |= AMD_TRANS_GOAL;
618 } else if (IS_USER_SETTINGS(cts)) {
619 update_type |= AMD_TRANS_USER;
622 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
623 cts->ccb_h.status = CAM_REQ_INVALID;
628 targ_info = &amd->tinfo[target_id];
630 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
631 if (update_type & AMD_TRANS_GOAL) {
632 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
634 targ_info->disc_tag |= AMD_CUR_DISCENB;
636 targ_info->disc_tag &= ~AMD_CUR_DISCENB;
639 if (update_type & AMD_TRANS_USER) {
640 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
642 targ_info->disc_tag |= AMD_USR_DISCENB;
644 targ_info->disc_tag &= ~AMD_USR_DISCENB;
648 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
649 if (update_type & AMD_TRANS_GOAL) {
650 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
652 targ_info->disc_tag |= AMD_CUR_TAGENB;
654 targ_info->disc_tag &= ~AMD_CUR_TAGENB;
657 if (update_type & AMD_TRANS_USER) {
658 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
660 targ_info->disc_tag |= AMD_USR_TAGENB;
662 targ_info->disc_tag &= ~AMD_USR_TAGENB;
667 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
668 if (update_type & AMD_TRANS_GOAL)
669 spi->sync_offset = targ_info->goal.offset;
671 spi->sync_offset = targ_info->user.offset;
674 if (spi->sync_offset > AMD_MAX_SYNC_OFFSET)
675 spi->sync_offset = AMD_MAX_SYNC_OFFSET;
677 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
678 if (update_type & AMD_TRANS_GOAL)
679 spi->sync_period = targ_info->goal.period;
681 spi->sync_period = targ_info->user.period;
684 last_entry = sizeof(tinfo_sync_period) - 1;
685 if ((spi->sync_period != 0)
686 && (spi->sync_period < tinfo_sync_period[0]))
687 spi->sync_period = tinfo_sync_period[0];
688 if (spi->sync_period > tinfo_sync_period[last_entry])
689 spi->sync_period = 0;
690 if (spi->sync_offset == 0)
691 spi->sync_period = 0;
693 if ((update_type & AMD_TRANS_USER) != 0) {
694 targ_info->user.period = spi->sync_period;
695 targ_info->user.offset = spi->sync_offset;
697 if ((update_type & AMD_TRANS_GOAL) != 0) {
698 targ_info->goal.period = spi->sync_period;
699 targ_info->goal.offset = spi->sync_offset;
702 pccb->ccb_h.status = CAM_REQ_CMP;
706 case XPT_CALC_GEOMETRY:
710 extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
711 cam_calc_geometry(&pccb->ccg, extended);
716 pccb->ccb_h.status = CAM_REQ_INVALID;
723 amd_poll(struct cam_sim * psim)
725 amd_intr(cam_sim_softc(psim));
729 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
732 struct ccb_scsiio *pcsio;
734 struct amd_sg * pseg;
737 pcsio = &pSRB->pccb->csio;
739 dataPtr = (intptr_t) pcsio->data_ptr;
740 pseg = pSRB->SGsegment;
741 for (i = 0; i < pSRB->SGIndex; i++) {
742 dataPtr += (int) pseg->SGXLen;
745 dataPtr += (int) xferCnt;
746 return ((u_int8_t *) dataPtr);
750 amd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
754 baddr = (bus_addr_t *)arg;
755 *baddr = segs->ds_addr;
759 ResetDevParam(struct amd_softc * amd)
763 for (target = 0; target <= amd->max_id; target++) {
764 if (amd->AdaptSCSIID != target) {
765 amdsetsync(amd, target, /*clockrate*/0,
766 /*period*/0, /*offset*/0, AMD_TRANS_CUR);
772 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
773 u_int tag, struct srb_queue *queue, cam_status status)
776 struct amd_srb *next_srb;
778 for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
781 next_srb = TAILQ_NEXT(srb, links);
782 if (srb->pccb->ccb_h.target_id != target
783 && target != CAM_TARGET_WILDCARD)
786 if (srb->pccb->ccb_h.target_lun != lun
787 && lun != CAM_LUN_WILDCARD)
790 if (srb->TagNumber != tag
791 && tag != AMD_TAG_WILDCARD)
795 TAILQ_REMOVE(queue, srb, links);
796 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
797 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
798 && (status & CAM_DEV_QFRZN) != 0)
799 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
800 ccb->ccb_h.status = status;
807 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
808 u_int period, u_int offset, u_int type)
810 struct amd_target_info *tinfo;
814 tinfo = &amd->tinfo[target];
815 old_period = tinfo->current.period;
816 old_offset = tinfo->current.offset;
817 if ((type & AMD_TRANS_CUR) != 0
818 && (old_period != period || old_offset != offset)) {
819 struct cam_path *path;
821 tinfo->current.period = period;
822 tinfo->current.offset = offset;
823 tinfo->sync_period_reg = clockrate;
824 tinfo->sync_offset_reg = offset;
825 tinfo->CtrlR3 &= ~FAST_SCSI;
826 tinfo->CtrlR4 &= ~EATER_25NS;
828 tinfo->CtrlR4 |= EATER_25NS;
830 tinfo->CtrlR3 |= FAST_SCSI;
832 if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
833 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
834 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
835 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
836 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
838 /* If possible, update the XPT's notion of our transfer rate */
839 if (xpt_create_path(&path, /*periph*/NULL,
840 cam_sim_path(amd->psim), target,
841 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
842 struct ccb_trans_settings neg;
843 struct ccb_trans_settings_spi *spi =
844 &neg.xport_specific.spi;
845 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
846 memset(&neg, 0, sizeof (neg));
847 spi->sync_period = period;
848 spi->sync_offset = offset;
849 spi->valid = CTS_SPI_VALID_SYNC_RATE
850 | CTS_SPI_VALID_SYNC_OFFSET;
851 xpt_async(AC_TRANSFER_NEG, path, &neg);
855 if ((type & AMD_TRANS_GOAL) != 0) {
856 tinfo->goal.period = period;
857 tinfo->goal.offset = offset;
860 if ((type & AMD_TRANS_USER) != 0) {
861 tinfo->user.period = period;
862 tinfo->user.offset = offset;
867 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
869 panic("Implement me!\n");
875 **********************************************************************
876 * Function : amd_reset (struct amd_softc * amd)
877 * Purpose : perform a hard reset on the SCSI bus( and AMD chip).
878 * Inputs : cmd - command which caused the SCSI RESET
879 **********************************************************************
882 amd_reset(struct amd_softc * amd)
890 printf("DC390: RESET");
894 bval = amd_read8(amd, CNTLREG1);
895 bval |= DIS_INT_ON_SCSI_RST;
896 amd_write8(amd, CNTLREG1, bval); /* disable interrupt */
897 amd_ResetSCSIBus(amd);
899 for (i = 0; i < 500; i++) {
903 bval = amd_read8(amd, CNTLREG1);
904 bval &= ~DIS_INT_ON_SCSI_RST;
905 amd_write8(amd, CNTLREG1, bval); /* re-enable interrupt */
907 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
908 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
911 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
912 AMD_TAG_WILDCARD, &amd->running_srbs,
913 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
914 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
915 AMD_TAG_WILDCARD, &amd->waiting_srbs,
916 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
917 amd->active_srb = NULL;
924 amd_timeout(void *arg1)
926 struct amd_srb * pSRB;
928 pSRB = (struct amd_srb *) arg1;
933 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
936 struct ccb_scsiio *pcsio;
937 struct amd_target_info *targ_info;
945 target = pccb->ccb_h.target_id;
946 lun = pccb->ccb_h.target_lun;
947 targ_info = &amd->tinfo[target];
949 amd_clear_msg_state(amd);
950 amd_write8(amd, SCSIDESTIDREG, target);
951 amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
952 amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
953 amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
954 amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
955 amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
956 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
958 identify_msg = MSG_IDENTIFYFLAG | lun;
959 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
960 && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
961 && (pSRB->CmdBlock[0] != REQUEST_SENSE)
962 && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
963 identify_msg |= MSG_IDENTIFY_DISCFLAG;
965 amd_write8(amd, SCSIFIFOREG, identify_msg);
966 if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
967 || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
968 pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
969 if (targ_info->current.period != targ_info->goal.period
970 || targ_info->current.offset != targ_info->goal.offset) {
971 command = SEL_W_ATN_STOP;
972 amdconstructsdtr(amd, targ_info->goal.period,
973 targ_info->goal.offset);
974 } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
975 command = SEL_W_ATN2;
976 pSRB->SRBState = SRB_START;
977 amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
978 amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
981 pSRB->SRBState = SRB_START;
983 if (command != SEL_W_ATN_STOP)
984 amdsetupcommand(amd, pSRB);
986 if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
987 pSRB->SRBState = SRB_READY;
990 amd->last_phase = SCSI_ARBITRATING;
991 amd_write8(amd, SCSICMDREG, command);
992 amd->active_srb = pSRB;
993 amd->cur_target = target;
1000 * Catch an interrupt from the adapter.
1001 * Process pending device interrupts.
1006 struct amd_softc *amd;
1007 struct amd_srb *pSRB;
1008 u_int internstat = 0;
1012 amd = (struct amd_softc *)arg;
1016 printf("amd_intr: amd NULL return......");
1021 scsistat = amd_read8(amd, SCSISTATREG);
1022 if (!(scsistat & INTERRUPT)) {
1024 printf("amd_intr: scsistat = NULL ,return......");
1028 #ifdef AMD_DEBUG_SCSI_PHASE
1029 printf("scsistat=%2x,", scsistat);
1032 internstat = amd_read8(amd, INTERNSTATREG);
1033 intstat = amd_read8(amd, INTSTATREG);
1035 #ifdef AMD_DEBUG_SCSI_PHASE
1036 printf("intstat=%2x,", intstat);
1039 if (intstat & DISCONNECTED) {
1040 amd_Disconnect(amd);
1043 if (intstat & RESELECTED) {
1047 if (intstat & INVALID_CMD) {
1048 amd_InvalidCmd(amd);
1051 if (intstat & SCSI_RESET_) {
1052 amd_ScsiRstDetect(amd);
1055 if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1056 pSRB = amd->active_srb;
1058 * Run our state engine. First perform
1059 * post processing for the last phase we
1060 * were in, followed by any processing
1061 * required to handle the current phase.
1064 amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1065 amd->last_phase = scsistat & SCSI_PHASE_MASK;
1066 (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1071 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1073 struct amd_sg *psgl;
1074 u_int32_t ResidCnt, xferCnt;
1076 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1077 if (scsistat & PARITY_ERR) {
1078 pSRB->SRBStatus |= PARITY_ERROR;
1080 if (scsistat & COUNT_2_ZERO) {
1081 while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1083 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1085 if (pSRB->SGIndex < pSRB->SGcount) {
1087 psgl = pSRB->pSGlist;
1088 pSRB->SGPhysAddr = psgl->SGXPtr;
1089 pSRB->SGToBeXferLen = psgl->SGXLen;
1091 pSRB->SGToBeXferLen = 0;
1094 ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1095 ResidCnt += amd_read8(amd, CTCREG_LOW)
1096 | (amd_read8(amd, CTCREG_MID) << 8)
1097 | (amd_read8(amd, CURTXTCNTREG) << 16);
1099 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1100 pSRB->SGPhysAddr += xferCnt;
1101 pSRB->TotalXferredLen += xferCnt;
1102 pSRB->SGToBeXferLen = ResidCnt;
1105 amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1110 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1113 u_int16_t i, residual;
1114 struct amd_sg *psgl;
1115 u_int32_t ResidCnt, xferCnt;
1118 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1119 if (scsistat & PARITY_ERR) {
1120 pSRB->SRBStatus |= PARITY_ERROR;
1122 if (scsistat & COUNT_2_ZERO) {
1124 bval = amd_read8(amd, DMA_Status);
1125 if ((bval & DMA_XFER_DONE) != 0)
1128 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1130 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1132 if (pSRB->SGIndex < pSRB->SGcount) {
1134 psgl = pSRB->pSGlist;
1135 pSRB->SGPhysAddr = psgl->SGXPtr;
1136 pSRB->SGToBeXferLen = psgl->SGXLen;
1138 pSRB->SGToBeXferLen = 0;
1140 } else { /* phase changed */
1142 bval = amd_read8(amd, CURRENTFIFOREG);
1143 while (bval & 0x1f) {
1144 if ((bval & 0x1f) == 1) {
1145 for (i = 0; i < 0x100; i++) {
1146 bval = amd_read8(amd, CURRENTFIFOREG);
1147 if (!(bval & 0x1f)) {
1149 } else if (i == 0x0ff) {
1155 bval = amd_read8(amd, CURRENTFIFOREG);
1159 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1160 for (i = 0; i < 0x8000; i++) {
1161 if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1164 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1166 ResidCnt = amd_read8(amd, CTCREG_LOW)
1167 | (amd_read8(amd, CTCREG_MID) << 8)
1168 | (amd_read8(amd, CURTXTCNTREG) << 16);
1169 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1170 pSRB->SGPhysAddr += xferCnt;
1171 pSRB->TotalXferredLen += xferCnt;
1172 pSRB->SGToBeXferLen = ResidCnt;
1174 /* get residual byte */
1175 bval = amd_read8(amd, SCSIFIFOREG);
1176 ptr = phystovirt(pSRB, xferCnt);
1179 pSRB->TotalXferredLen++;
1180 pSRB->SGToBeXferLen--;
1188 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1190 pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1192 pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1193 pSRB->SRBState = SRB_COMPLETED;
1194 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1199 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1201 if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1202 scsistat = SCSI_NOP0;
1208 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1212 amd->msgin_buf[amd->msgin_index] = amd_read8(amd, SCSIFIFOREG);
1214 done = amdparsemsg(amd);
1216 amd->msgin_index = 0;
1223 amdparsemsg(struct amd_softc *amd)
1234 * Parse as much of the message as is availible,
1235 * rejecting it if we don't support it. When
1236 * the entire message is availible and has been
1237 * handled, return TRUE indicating that we have
1238 * parsed an entire message.
1240 switch (amd->msgin_buf[0]) {
1241 case MSG_DISCONNECT:
1242 amd->active_srb->SRBState = SRB_DISCONNECT;
1243 amd->disc_count[amd->cur_target][amd->cur_lun]++;
1246 case MSG_SIMPLE_Q_TAG:
1248 struct amd_srb *disc_srb;
1250 if (amd->msgin_index < 1)
1252 disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1253 if (amd->active_srb != NULL
1254 || disc_srb->SRBState != SRB_DISCONNECT
1255 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1256 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1257 printf("amd%d: Unexpected tagged reselection "
1258 "for target %d, Issuing Abort\n", amd->unit,
1260 amd->msgout_buf[0] = MSG_ABORT;
1261 amd->msgout_len = 1;
1265 amd->active_srb = disc_srb;
1266 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1270 case MSG_MESSAGE_REJECT:
1271 response = amdhandlemsgreject(amd);
1272 if (response == FALSE)
1273 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1285 /* Wait for enough of the message to begin validation */
1286 if (amd->msgin_index < 1)
1288 if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1293 /* Wait for opcode */
1294 if (amd->msgin_index < 2)
1297 if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1303 * Wait until we have both args before validating
1304 * and acting on this message.
1306 * Add one to MSG_EXT_SDTR_LEN to account for
1307 * the extended message preamble.
1309 if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1312 period = amd->msgin_buf[3];
1313 saved_offset = offset = amd->msgin_buf[4];
1314 clockrate = amdfindclockrate(amd, &period);
1315 if (offset > AMD_MAX_SYNC_OFFSET)
1316 offset = AMD_MAX_SYNC_OFFSET;
1317 if (period == 0 || offset == 0) {
1322 amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1323 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1326 * See if we initiated Sync Negotiation
1327 * and didn't have to fall down to async
1330 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1332 if (saved_offset != offset) {
1333 /* Went too low - force async */
1338 * Send our own SDTR in reply
1341 printf("Sending SDTR!\n");
1342 amd->msgout_index = 0;
1343 amd->msgout_len = 0;
1344 amdconstructsdtr(amd, period, offset);
1345 amd->msgout_index = 0;
1351 case MSG_SAVEDATAPOINTER:
1352 case MSG_RESTOREPOINTERS:
1353 /* XXX Implement!!! */
1362 amd->msgout_index = 0;
1363 amd->msgout_len = 1;
1364 amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1370 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1372 if (done && !response)
1373 /* Clear the outgoing message buffer */
1374 amd->msgout_len = 0;
1377 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1383 amdfindclockrate(struct amd_softc *amd, u_int *period)
1388 for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1389 u_int8_t *table_entry;
1391 table_entry = &tinfo_sync_period[i];
1392 if (*period <= *table_entry) {
1394 * When responding to a target that requests
1395 * sync, the requested rate may fall between
1396 * two rates that we can output, but still be
1397 * a rate that we can receive. Because of this,
1398 * we want to respond to the target with
1399 * the same rate that it sent to us even
1400 * if the period we use to send data to it
1401 * is lower. Only lower the response period
1405 *period = *table_entry;
1411 if (i == sizeof(tinfo_sync_period)) {
1412 /* Too slow for us. Use asnyc transfers. */
1422 * See if we sent a particular extended message to the target.
1423 * If "full" is true, the target saw the full message.
1424 * If "full" is false, the target saw at least the first
1425 * byte of the message.
1428 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1436 while (index < amd->msgout_len) {
1437 if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1438 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1440 else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1441 && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1442 /* Skip tag type and tag id */
1444 } else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1445 /* Found a candidate */
1446 if (amd->msgout_buf[index+2] == msgtype) {
1449 end_index = index + 1
1450 + amd->msgout_buf[index + 1];
1452 if (amd->msgout_index > end_index)
1454 } else if (amd->msgout_index > index)
1459 panic("amdsentmsg: Inconsistent msg buffer");
1466 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1468 amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1469 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1470 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1471 amd->msgout_buf[amd->msgout_index++] = period;
1472 amd->msgout_buf[amd->msgout_index++] = offset;
1473 amd->msgout_len += 5;
1477 amdhandlemsgreject(struct amd_softc *amd)
1480 * If we had an outstanding SDTR for this
1481 * target, this is a signal that the target
1482 * is refusing negotiation. Also watch out
1483 * for rejected tag messages.
1485 struct amd_srb *srb;
1486 struct amd_target_info *targ_info;
1487 int response = FALSE;
1489 srb = amd->active_srb;
1490 targ_info = &amd->tinfo[amd->cur_target];
1491 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1492 /* note asynch xfers and clear flag */
1493 amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1494 /*period*/0, /*offset*/0,
1495 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1496 printf("amd%d:%d: refuses synchronous negotiation. "
1497 "Using asynchronous transfers\n",
1498 amd->unit, amd->cur_target);
1499 } else if ((srb != NULL)
1500 && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1501 struct ccb_trans_settings neg;
1502 struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi;
1504 printf("amd%d:%d: refuses tagged commands. Performing "
1505 "non-tagged I/O\n", amd->unit, amd->cur_target);
1507 amdsettags(amd, amd->cur_target, FALSE);
1508 memset(&neg, 0, sizeof (neg));
1509 scsi->valid = CTS_SCSI_VALID_TQ;
1510 xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1511 xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1514 * Resend the identify for this CCB as the target
1515 * may believe that the selection is invalid otherwise.
1517 if (amd->msgout_len != 0)
1518 bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1520 amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1521 | srb->pccb->ccb_h.target_lun;
1523 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1524 && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1525 amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1527 srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1530 * Requeue all tagged commands for this target
1531 * currently in our posession so they can be
1532 * converted to untagged commands.
1534 amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1535 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1536 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1539 * Otherwise, we ignore it.
1541 printf("amd%d:%d: Message reject received -- ignored\n",
1542 amd->unit, amd->cur_target);
1548 if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1549 if (bval == MSG_DISCONNECT) {
1550 pSRB->SRBState = SRB_DISCONNECT;
1551 } else if (bval == MSG_SAVEDATAPOINTER) {
1553 } else if ((bval == MSG_EXTENDED)
1554 || ((bval >= MSG_SIMPLE_Q_TAG)
1555 && (bval <= MSG_ORDERED_Q_TAG))) {
1556 pSRB->SRBState |= SRB_MSGIN_MULTI;
1557 pSRB->MsgInBuf[0] = bval;
1559 pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1560 } else if (bval == MSG_MESSAGE_REJECT) {
1561 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1563 if (pSRB->SRBState & DO_SYNC_NEGO) {
1566 } else if (bval == MSG_RESTOREPOINTERS) {
1571 } else { /* minx: */
1572 *pSRB->pMsgPtr = bval;
1575 if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1576 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1577 if (pSRB->MsgCnt == 2) {
1579 pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1580 if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1581 pSRB = amd->pTmpSRB;
1582 pSRB->SRBState = SRB_UNEXPECT_RESEL;
1583 pDCB->pActiveSRB = pSRB;
1584 pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1585 EnableMsgOut2(amd, pSRB);
1587 if (pDCB->DCBFlag & ABORT_DEV_) {
1588 pSRB->SRBState = SRB_ABORT_SENT;
1589 EnableMsgOut1(amd, pSRB);
1591 pDCB->pActiveSRB = pSRB;
1592 pSRB->SRBState = SRB_DATA_XFER;
1595 } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1596 && (pSRB->MsgCnt == 5)) {
1597 pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1598 if ((pSRB->MsgInBuf[1] != 3)
1599 || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */
1601 pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1602 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1603 } else if (!(pSRB->MsgInBuf[3])
1604 || !(pSRB->MsgInBuf[4])) {
1605 set_async: /* set async */
1607 pDCB = pSRB->pSRBDCB;
1608 /* disable sync & sync nego */
1609 pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1610 pDCB->SyncPeriod = 0;
1611 pDCB->SyncOffset = 0;
1613 pDCB->tinfo.goal.period = 0;
1614 pDCB->tinfo.goal.offset = 0;
1616 pDCB->tinfo.current.period = 0;
1617 pDCB->tinfo.current.offset = 0;
1618 pDCB->tinfo.current.width =
1619 MSG_EXT_WDTR_BUS_8_BIT;
1621 pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1622 pDCB->CtrlR4 &= 0x3f;
1623 pDCB->CtrlR4 |= EATER_25NS;
1625 } else {/* set sync */
1627 pDCB = pSRB->pSRBDCB;
1628 /* enable sync & sync nego */
1629 pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1631 /* set sync offset */
1632 pDCB->SyncOffset &= 0x0f0;
1633 pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1635 /* set sync period */
1636 pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1638 wval = (u_int16_t) pSRB->MsgInBuf[3];
1642 if ((wval1 * 25) != wval) {
1645 bval = FAST_CLK|FAST_SCSI;
1646 pDCB->CtrlR4 &= 0x3f;
1651 pDCB->CtrlR4 |= EATER_25NS;
1653 pDCB->CtrlR3 = bval;
1654 pDCB->SyncPeriod = (u_int8_t) wval1;
1656 pDCB->tinfo.goal.period =
1657 tinfo_sync_period[pDCB->SyncPeriod - 4];
1658 pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1659 pDCB->tinfo.current.period =
1660 tinfo_sync_period[pDCB->SyncPeriod - 4];;
1661 pDCB->tinfo.current.offset = pDCB->SyncOffset;
1664 * program SCSI control register
1667 amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1668 amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1669 amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1670 amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1675 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1681 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1683 DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1688 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1690 DataIO_Comm(amd, pSRB, READ_DIRECTION);
1695 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1697 struct amd_sg * psgl;
1700 if (pSRB->SGIndex < pSRB->SGcount) {
1701 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1703 if (!pSRB->SGToBeXferLen) {
1704 psgl = pSRB->pSGlist;
1705 pSRB->SGPhysAddr = psgl->SGXPtr;
1706 pSRB->SGToBeXferLen = psgl->SGXLen;
1708 lval = pSRB->SGToBeXferLen;
1709 amd_write8(amd, CTCREG_LOW, lval);
1710 amd_write8(amd, CTCREG_MID, lval >> 8);
1711 amd_write8(amd, CURTXTCNTREG, lval >> 16);
1713 amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1715 amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1717 pSRB->SRBState = SRB_DATA_XFER;
1719 amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1721 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1723 amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1724 } else { /* xfer pad */
1725 if (pSRB->SGcount) {
1726 pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1727 pSRB->SRBStatus |= OVER_RUN;
1729 amd_write8(amd, CTCREG_LOW, 0);
1730 amd_write8(amd, CTCREG_MID, 0);
1731 amd_write8(amd, CURTXTCNTREG, 0);
1733 pSRB->SRBState |= SRB_XFERPAD;
1734 amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1739 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1741 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1742 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1744 amdsetupcommand(amd, srb);
1746 srb->SRBState = SRB_COMMAND;
1747 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1752 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1754 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1755 pSRB->SRBState = SRB_STATUS;
1756 amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1761 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1763 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1765 if (amd->msgout_len == 0) {
1766 amd->msgout_buf[0] = MSG_NOOP;
1767 amd->msgout_len = 1;
1769 amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1770 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1775 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1777 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1778 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1783 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1789 amd_Disconnect(struct amd_softc * amd)
1791 struct amd_srb *srb;
1795 srb = amd->active_srb;
1796 amd->active_srb = NULL;
1797 amd->last_phase = SCSI_BUS_FREE;
1798 amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1799 target = amd->cur_target;
1803 /* Invalid reselection */
1805 } else if (srb->SRBState & SRB_ABORT_SENT) {
1806 /* Clean up and done this srb */
1808 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1809 /* XXX What about "done'ing" these srbs??? */
1810 if (pSRB->pSRBDCB == pDCB) {
1811 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1812 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1818 if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1819 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1820 srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1822 } else if (srb->SRBState & SRB_DISCONNECT) {
1823 if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1824 amd->untagged_srbs[target][lun] = srb;
1826 } else if (srb->SRBState & SRB_COMPLETED) {
1828 srb->SRBState = SRB_FREE;
1836 amd_Reselect(struct amd_softc *amd)
1838 struct amd_target_info *tinfo;
1839 u_int16_t disc_count;
1841 amd_clear_msg_state(amd);
1842 if (amd->active_srb != NULL) {
1843 /* Requeue the SRB for our attempted Selection */
1844 TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1845 TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1846 amd->active_srb = NULL;
1849 amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1850 amd->cur_target ^= amd->HostID_Bit;
1851 amd->cur_target = ffs(amd->cur_target) - 1;
1852 amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1853 tinfo = &amd->tinfo[amd->cur_target];
1854 amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1855 disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1856 if (disc_count == 0) {
1857 printf("amd%d: Unexpected reselection for target %d, "
1858 "Issuing Abort\n", amd->unit, amd->cur_target);
1859 amd->msgout_buf[0] = MSG_ABORT;
1860 amd->msgout_len = 1;
1861 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1863 if (amd->active_srb != NULL) {
1864 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1865 amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1868 amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1869 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1870 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1871 amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1872 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1873 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1874 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1875 amd->last_phase = SCSI_NOP0;
1879 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1881 u_int8_t bval, i, status;
1883 struct ccb_scsiio *pcsio;
1885 struct amd_sg *ptr2;
1889 pcsio = &pccb->csio;
1891 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1892 ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1894 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1895 bus_dmasync_op_t op;
1897 if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1898 op = BUS_DMASYNC_POSTREAD;
1900 op = BUS_DMASYNC_POSTWRITE;
1901 bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1902 bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1905 status = pSRB->TargetStatus;
1906 pccb->ccb_h.status = CAM_REQ_CMP;
1907 if (pSRB->SRBFlag & AUTO_REQSENSE) {
1908 pSRB->SRBFlag &= ~AUTO_REQSENSE;
1909 pSRB->AdaptStatus = 0;
1910 pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1912 if (status == SCSI_STATUS_CHECK_COND) {
1913 pccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1916 *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1918 pcsio->sense_resid = pcsio->sense_len
1919 - pSRB->TotalXferredLen;
1920 pSRB->TotalXferredLen = pSRB->Segment1[1];
1921 if (pSRB->TotalXferredLen) {
1923 pcsio->resid = pcsio->dxfer_len
1924 - pSRB->TotalXferredLen;
1925 /* The resid field contains valid data */
1926 /* Flush resid bytes on complete */
1928 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1930 bzero(&pcsio->sense_data, pcsio->sense_len);
1931 bcopy(amd_get_sense_buf(amd, pSRB), &pcsio->sense_data,
1933 pccb->ccb_h.status = CAM_AUTOSNS_VALID;
1937 if (status == SCSI_STATUS_CHECK_COND) {
1939 if ((pSRB->SGIndex < pSRB->SGcount)
1940 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1941 bval = pSRB->SGcount;
1942 swlval = pSRB->SGToBeXferLen;
1943 ptr2 = pSRB->pSGlist;
1945 for (i = pSRB->SGIndex + 1; i < bval; i++) {
1946 swlval += ptr2->SGXLen;
1950 pcsio->resid = (u_int32_t) swlval;
1953 printf("XferredLen=%8x,NotYetXferLen=%8x,",
1954 pSRB->TotalXferredLen, swlval);
1957 if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1959 printf("RequestSense..................\n");
1961 RequestSense(amd, pSRB);
1964 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1965 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1967 } else if (status == SCSI_STATUS_QUEUE_FULL) {
1968 pSRB->AdaptStatus = 0;
1969 pSRB->TargetStatus = 0;
1970 pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1971 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1973 } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1974 pSRB->AdaptStatus = H_SEL_TIMEOUT;
1975 pSRB->TargetStatus = 0;
1977 pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1978 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1979 } else if (status == SCSI_STATUS_BUSY) {
1981 printf("DC390: target busy at %s %d\n",
1982 __FILE__, __LINE__);
1984 pcsio->scsi_status = SCSI_STATUS_BUSY;
1985 pccb->ccb_h.status = CAM_SCSI_BUSY;
1986 } else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1988 printf("DC390: target reserved at %s %d\n",
1989 __FILE__, __LINE__);
1991 pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1992 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1994 pSRB->AdaptStatus = 0;
1996 printf("DC390: driver stuffup at %s %d\n",
1997 __FILE__, __LINE__);
1999 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2002 status = pSRB->AdaptStatus;
2003 if (status & H_OVER_UNDER_RUN) {
2004 pSRB->TargetStatus = 0;
2006 pccb->ccb_h.status = CAM_DATA_RUN_ERR;
2007 } else if (pSRB->SRBStatus & PARITY_ERROR) {
2009 printf("DC390: driver stuffup %s %d\n",
2010 __FILE__, __LINE__);
2012 /* Driver failed to perform operation */
2013 pccb->ccb_h.status = CAM_UNCOR_PARITY;
2014 } else { /* No error */
2015 pSRB->AdaptStatus = 0;
2016 pSRB->TargetStatus = 0;
2018 /* there is no error, (sense is invalid) */
2023 if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2024 /* CAM request not yet complete =>device_Q frozen */
2025 xpt_freeze_devq(pccb->ccb_h.path, 1);
2026 pccb->ccb_h.status |= CAM_DEV_QFRZN;
2028 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2029 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
2037 amd_ResetSCSIBus(struct amd_softc * amd)
2042 amd->ACBFlag |= RESET_DEV;
2043 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2044 amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2050 amd_ScsiRstDetect(struct amd_softc * amd)
2056 printf("amd_ScsiRstDetect \n");
2060 while (--wlval) { /* delay 1 sec */
2065 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2066 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2068 if (amd->ACBFlag & RESET_DEV) {
2069 amd->ACBFlag |= RESET_DONE;
2071 amd->ACBFlag |= RESET_DETECT;
2073 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2074 AMD_TAG_WILDCARD, &amd->running_srbs,
2075 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2076 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2077 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2078 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2079 amd->active_srb = NULL;
2088 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2091 struct ccb_scsiio *pcsio;
2094 pcsio = &pccb->csio;
2096 pSRB->SRBFlag |= AUTO_REQSENSE;
2097 pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2098 pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2099 pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2100 pSRB->Segment1[1] = pSRB->TotalXferredLen;
2102 pSRB->AdaptStatus = 0;
2103 pSRB->TargetStatus = 0;
2105 pSRB->Segmentx.SGXPtr = amd_get_sense_bufaddr(amd, pSRB);
2106 pSRB->Segmentx.SGXLen = amd_get_sense_bufsize(amd, pSRB);
2108 pSRB->pSGlist = &pSRB->Segmentx;
2112 pSRB->CmdBlock[0] = REQUEST_SENSE;
2113 pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2114 pSRB->CmdBlock[2] = 0;
2115 pSRB->CmdBlock[3] = 0;
2116 pSRB->CmdBlock[4] = pcsio->sense_len;
2117 pSRB->CmdBlock[5] = 0;
2118 pSRB->ScsiCmdLen = 6;
2120 pSRB->TotalXferredLen = 0;
2121 pSRB->SGToBeXferLen = 0;
2122 if (amdstart(amd, pSRB) != 0) {
2123 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2124 TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2129 amd_InvalidCmd(struct amd_softc * amd)
2131 struct amd_srb *srb;
2133 srb = amd->active_srb;
2134 if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2135 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2139 amd_linkSRB(struct amd_softc *amd)
2142 struct amd_srb *psrb;
2145 count = amd->SRBCount;
2147 for (i = 0; i < count; i++) {
2148 psrb = (struct amd_srb *)&amd->SRB_array[i];
2149 psrb->TagNumber = i;
2152 * Create the dmamap. This is no longer optional!
2154 * XXX Since there is no detach method in this driver,
2155 * this does not get freed!
2157 if ((error = bus_dmamap_create(amd->buffer_dmat, 0,
2158 &psrb->dmamap)) != 0) {
2159 device_printf(amd->dev, "Error %d creating buffer "
2160 "dmamap!\n", error);
2163 TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2168 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2170 if (mode == ENABLE_CE) {
2175 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2176 if (mode == DISABLE_CE) {
2177 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2183 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2191 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2195 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2197 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2202 amd_EEpromInDO(struct amd_softc *amd)
2204 pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2206 pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2208 if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2214 EEpromGetData1(struct amd_softc *amd)
2221 for (i = 0; i < 16; i++) {
2223 carryFlag = amd_EEpromInDO(amd);
2230 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2237 for (i = 0; i < 9; i++) {
2238 amd_EEpromOutDI(amd, regval, carryFlag);
2239 carryFlag = (EEpromCmd & j) ? 1 : 0;
2245 amd_ReadEEprom(struct amd_softc *amd)
2252 ptr = (u_int16_t *)&amd->eepromBuf[0];
2254 for (i = 0; i < 0x40; i++) {
2255 amd_EnDisableCE(amd, ENABLE_CE, ®val);
2256 amd_Prepare(amd, ®val, cmd);
2257 *ptr = EEpromGetData1(amd);
2260 amd_EnDisableCE(amd, DISABLE_CE, ®val);
2265 amd_load_defaults(struct amd_softc *amd)
2269 bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2270 for (target = 0; target < MAX_SCSI_ID; target++)
2271 amd->eepromBuf[target << 2] =
2272 (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2273 amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2274 amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2275 amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2279 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2281 u_int16_t wval, *ptr;
2284 amd_ReadEEprom(amd);
2286 ptr = (u_int16_t *) & amd->eepromBuf[0];
2287 for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2290 if (wval != EE_CHECKSUM) {
2292 printf("amd%d: SEEPROM data unavailable. "
2293 "Using default device parameters.\n",
2295 amd_load_defaults(amd);
2300 **********************************************************************
2301 * Function : static int amd_init (struct Scsi_Host *host)
2302 * Purpose : initialize the internal structures for a given SCSI host
2303 * Inputs : host - pointer to this host adapter's structure/
2304 **********************************************************************
2307 amd_init(device_t dev)
2309 struct amd_softc *amd = device_get_softc(dev);
2310 struct resource *iores;
2314 rid = PCI_BASE_ADDR0;
2315 iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE);
2316 if (iores == NULL) {
2318 printf("amd_init: bus_alloc_resource failure!\n");
2321 amd->tag = rman_get_bustag(iores);
2322 amd->bsh = rman_get_bushandle(iores);
2324 /* DMA tag for mapping buffers into device visible space. */
2325 if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2327 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2328 /*highaddr*/BUS_SPACE_MAXADDR,
2329 /*filter*/NULL, /*filterarg*/NULL,
2330 /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2331 /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2332 /*flags*/BUS_DMA_ALLOCNOW,
2333 /*lockfunc*/busdma_lock_mutex,
2335 &amd->buffer_dmat) != 0) {
2337 printf("amd_init: bus_dma_tag_create failure!\n");
2341 /* Create, allocate, and map DMA buffers for autosense data */
2342 if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2344 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2345 /*highaddr*/BUS_SPACE_MAXADDR,
2346 /*filter*/NULL, /*filterarg*/NULL,
2347 sizeof(struct scsi_sense_data) * MAX_SRB_CNT,
2349 /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2351 /*lockfunc*/busdma_lock_mutex,
2352 /*lockarg*/&Giant, &amd->sense_dmat) != 0) {
2354 device_printf(dev, "cannot create sense buffer dmat\n");
2358 if (bus_dmamem_alloc(amd->sense_dmat, (void **)&amd->sense_buffers,
2359 BUS_DMA_NOWAIT, &amd->sense_dmamap) != 0)
2362 bus_dmamap_load(amd->sense_dmat, amd->sense_dmamap,
2364 sizeof(struct scsi_sense_data) * MAX_SRB_CNT,
2365 amd_dmamap_cb, &amd->sense_busaddr, /*flags*/0);
2367 TAILQ_INIT(&amd->free_srbs);
2368 TAILQ_INIT(&amd->running_srbs);
2369 TAILQ_INIT(&amd->waiting_srbs);
2370 amd->last_phase = SCSI_BUS_FREE;
2372 amd->unit = device_get_unit(dev);
2373 amd->SRBCount = MAX_SRB_CNT;
2375 amd_load_eeprom_or_defaults(amd);
2377 if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2382 amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2383 amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2384 amd->AdaptSCSILUN = 0;
2385 /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2387 amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2389 for (i = 0; i <= amd->max_id; i++) {
2391 if (amd->AdaptSCSIID != i) {
2392 struct amd_target_info *tinfo;
2395 tinfo = &amd->tinfo[i];
2396 prom = (PEEprom)&amd->eepromBuf[i << 2];
2397 if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2398 tinfo->disc_tag |= AMD_USR_DISCENB;
2399 if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2400 tinfo->disc_tag |= AMD_USR_TAGENB;
2402 if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2403 tinfo->user.period =
2404 eeprom_period[prom->EE_SPEED];
2405 tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2407 tinfo->CtrlR1 = amd->AdaptSCSIID;
2408 if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2409 tinfo->CtrlR1 |= PARITY_ERR_REPO;
2410 tinfo->CtrlR3 = FAST_CLK;
2411 tinfo->CtrlR4 = EATER_25NS;
2412 if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2413 tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2416 amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2417 /* Conversion factor = 0 , 40MHz clock */
2418 amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2419 /* NOP cmd - clear command register */
2420 amd_write8(amd, SCSICMDREG, NOP_CMD);
2421 amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2422 amd_write8(amd, CNTLREG3, FAST_CLK);
2424 if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2425 bval |= NEGATE_REQACKDATA;
2427 amd_write8(amd, CNTLREG4, bval);
2429 /* Disable SCSI bus reset interrupt */
2430 amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2436 * attach and init a host adapter
2439 amd_attach(device_t dev)
2441 struct cam_devq *devq; /* Device Queue to use for this SIM */
2443 struct amd_softc *amd = device_get_softc(dev);
2444 int unit = device_get_unit(dev);
2447 struct resource *irqres;
2449 if (amd_init(dev)) {
2451 printf("amd_attach: amd_init failure!\n");
2455 /* Reset Pending INT */
2456 intstat = amd_read8(amd, INTSTATREG);
2458 /* After setting up the adapter, map our interrupt */
2460 irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2461 RF_SHAREABLE | RF_ACTIVE);
2462 if (irqres == NULL ||
2463 bus_setup_intr(dev, irqres, INTR_TYPE_CAM | INTR_ENTROPY,
2464 NULL, amd_intr, amd, &ih)) {
2466 printf("amd%d: unable to register interrupt handler!\n",
2472 * Now let the CAM generic SCSI layer find the SCSI devices on
2473 * the bus * start queue to reset to the idle loop. *
2474 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
2475 * max_sim_transactions
2477 devq = cam_simq_alloc(MAX_START_JOB);
2480 printf("amd_attach: cam_simq_alloc failure!\n");
2484 amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2485 amd, amd->unit, &Giant,
2486 1, MAX_TAGS_CMD_QUEUE, devq);
2487 if (amd->psim == NULL) {
2488 cam_simq_free(devq);
2490 printf("amd_attach: cam_sim_alloc failure!\n");
2494 if (xpt_bus_register(amd->psim, dev, 0) != CAM_SUCCESS) {
2495 cam_sim_free(amd->psim, /*free_devq*/TRUE);
2497 printf("amd_attach: xpt_bus_register failure!\n");
2501 if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2502 cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2503 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2504 xpt_bus_deregister(cam_sim_path(amd->psim));
2505 cam_sim_free(amd->psim, /* free_simq */ TRUE);
2507 printf("amd_attach: xpt_create_path failure!\n");
2515 amd_probe(device_t dev)
2517 if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2518 device_set_desc(dev,
2519 "Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2520 return BUS_PROBE_DEFAULT;
2525 static device_method_t amd_methods[] = {
2526 /* Device interface */
2527 DEVMETHOD(device_probe, amd_probe),
2528 DEVMETHOD(device_attach, amd_attach),
2532 static driver_t amd_driver = {
2533 "amd", amd_methods, sizeof(struct amd_softc)
2536 static devclass_t amd_devclass;
2537 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);
2538 MODULE_DEPEND(amd, pci, 1, 1, 1);
2539 MODULE_DEPEND(amd, cam, 1, 1, 1);