]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - sys/dev/amd/amd.c
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / sys / dev / amd / amd.c
1 /*-
2  *********************************************************************
3  *      FILE NAME  : amd.c
4  *           BY    : C.L. Huang         (ching@tekram.com.tw)
5  *                   Erich Chen     (erich@tekram.com.tw)
6  *      Description: Device Driver for the amd53c974 PCI Bus Master
7  *                   SCSI Host adapter found on cards such as
8  *                   the Tekram DC-390(T).
9  * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *********************************************************************
33  * $FreeBSD$
34  */
35
36 /*
37  *********************************************************************
38  *      HISTORY:
39  *
40  *      REV#    DATE    NAME            DESCRIPTION
41  *      1.00  07/02/96  CLH             First release for RELEASE-2.1.0
42  *      1.01  08/20/96  CLH             Update for RELEASE-2.1.5
43  *      1.02  11/06/96  CLH             Fixed more than 1 LUN scanning
44  *      1.03  12/20/96  CLH             Modify to support 2.2-ALPHA
45  *      1.04  12/26/97  CLH             Modify to support RELEASE-2.2.5
46  *      1.05  01/01/99  ERICH CHEN      Modify to support RELEASE-3.0.x (CAM)
47  *********************************************************************
48  */
49
50 /* #define AMD_DEBUG0           */
51 /* #define AMD_DEBUG_SCSI_PHASE */
52
53 #include <sys/param.h>
54
55 #include <sys/systm.h>
56 #include <sys/queue.h>
57 #include <sys/kernel.h>
58 #include <sys/module.h>
59 #include <sys/lock.h>
60 #include <sys/mutex.h>
61 #include <sys/malloc.h>
62
63 #include <vm/vm.h>
64 #include <vm/pmap.h>
65
66 #include <machine/bus.h>
67 #include <machine/resource.h>
68 #include <sys/bus.h>
69 #include <sys/rman.h>
70
71 #include <cam/cam.h>
72 #include <cam/cam_ccb.h>
73 #include <cam/cam_sim.h>
74 #include <cam/cam_xpt_sim.h>
75 #include <cam/cam_debug.h>
76
77 #include <cam/scsi/scsi_all.h>
78 #include <cam/scsi/scsi_message.h>
79
80 #include <dev/pci/pcivar.h>
81 #include <dev/pci/pcireg.h>
82 #include <dev/amd/amd.h>
83
84 #define PCI_DEVICE_ID_AMD53C974         0x20201022ul
85 #define PCI_BASE_ADDR0                  0x10
86
87 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
88 typedef phase_handler_t *phase_handler_func_t;
89
90 static void amd_intr(void *vamd);
91 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
92 static phase_handler_t amd_NopPhase;
93
94 static phase_handler_t amd_DataOutPhase0;
95 static phase_handler_t amd_DataInPhase0;
96 #define amd_CommandPhase0 amd_NopPhase
97 static phase_handler_t amd_StatusPhase0;
98 static phase_handler_t amd_MsgOutPhase0;
99 static phase_handler_t amd_MsgInPhase0;
100 static phase_handler_t amd_DataOutPhase1;
101 static phase_handler_t amd_DataInPhase1;
102 static phase_handler_t amd_CommandPhase1;
103 static phase_handler_t amd_StatusPhase1;
104 static phase_handler_t amd_MsgOutPhase1;
105 static phase_handler_t amd_MsgInPhase1;
106
107 static void     amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
108 static int      amdparsemsg(struct amd_softc *amd);
109 static int      amdhandlemsgreject(struct amd_softc *amd);
110 static void     amdconstructsdtr(struct amd_softc *amd,
111                                  u_int period, u_int offset);
112 static u_int    amdfindclockrate(struct amd_softc *amd, u_int *period);
113 static int      amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
114
115 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
116 static void amd_Disconnect(struct amd_softc *amd);
117 static void amd_Reselect(struct amd_softc *amd);
118 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
119 static void amd_ScsiRstDetect(struct amd_softc *amd);
120 static void amd_ResetSCSIBus(struct amd_softc *amd);
121 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
122 static void amd_InvalidCmd(struct amd_softc *amd);
123
124 static void amd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
125                           int error);
126
127 #if 0
128 static void amd_timeout(void *arg1);
129 static void amd_reset(struct amd_softc *amd);
130 #endif
131 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
132
133 void    amd_linkSRB(struct amd_softc *amd);
134 static int amd_init(device_t);
135 static void amd_load_defaults(struct amd_softc *amd);
136 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
137 static int amd_EEpromInDO(struct amd_softc *amd);
138 static u_int16_t EEpromGetData1(struct amd_softc *amd);
139 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
140 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
141 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
142 static void amd_ReadEEprom(struct amd_softc *amd);
143
144 static int amd_probe(device_t);
145 static int amd_attach(device_t);
146 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
147                              lun_id_t lun, u_int tag, struct srb_queue *queue,
148                              cam_status status);
149 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
150                        u_int period, u_int offset, u_int type);
151 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
152
153 static __inline void amd_clear_msg_state(struct amd_softc *amd);
154
155 static __inline void
156 amd_clear_msg_state(struct amd_softc *amd)
157 {
158         amd->msgout_len = 0;
159         amd->msgout_index = 0;
160         amd->msgin_index = 0;
161 }
162
163 static __inline uint32_t
164 amd_get_sense_bufaddr(struct amd_softc *amd, struct amd_srb *pSRB)
165 {
166         int offset;
167
168         offset = pSRB->TagNumber;
169         return (amd->sense_busaddr + (offset * sizeof(struct scsi_sense_data)));
170 }
171
172 static __inline struct scsi_sense_data *
173 amd_get_sense_buf(struct amd_softc *amd, struct amd_srb *pSRB)
174 {
175         int offset;
176
177         offset = pSRB->TagNumber;
178         return (&amd->sense_buffers[offset]);
179 }
180
181 static __inline uint32_t
182 amd_get_sense_bufsize(struct amd_softc *amd, struct amd_srb *pSRB)
183 {
184         return (sizeof(struct scsi_sense_data));
185 }
186
187 /* CAM SIM entry points */
188 #define ccb_srb_ptr spriv_ptr0
189 #define ccb_amd_ptr spriv_ptr1
190 static void     amd_action(struct cam_sim *sim, union ccb *ccb);
191 static void     amd_poll(struct cam_sim *sim);
192
193 /*
194  * State engine function tables indexed by SCSI phase number
195  */
196 phase_handler_func_t amd_SCSI_phase0[] = {
197         amd_DataOutPhase0,
198         amd_DataInPhase0,
199         amd_CommandPhase0,
200         amd_StatusPhase0,
201         amd_NopPhase,
202         amd_NopPhase,
203         amd_MsgOutPhase0,
204         amd_MsgInPhase0
205 };
206
207 phase_handler_func_t amd_SCSI_phase1[] = {
208         amd_DataOutPhase1,
209         amd_DataInPhase1,
210         amd_CommandPhase1,
211         amd_StatusPhase1,
212         amd_NopPhase,
213         amd_NopPhase,
214         amd_MsgOutPhase1,
215         amd_MsgInPhase1
216 };
217
218 /*
219  * EEProm/BIOS negotiation periods
220  */
221 u_int8_t   eeprom_period[] = {
222          25,    /* 10.0MHz */
223          32,    /*  8.0MHz */
224          38,    /*  6.6MHz */
225          44,    /*  5.7MHz */
226          50,    /*  5.0MHz */
227          63,    /*  4.0MHz */
228          83,    /*  3.0MHz */
229         125     /*  2.0MHz */
230 };
231
232 /*
233  * chip clock setting to SCSI specified sync parameter table.
234  */
235 u_int8_t tinfo_sync_period[] = {
236         25,     /* 10.0 */
237         32,     /* 8.0 */
238         38,     /* 6.6 */
239         44,     /* 5.7 */
240         50,     /* 5.0 */
241         57,     /* 4.4 */
242         63,     /* 4.0 */
243         70,     /* 3.6 */
244         76,     /* 3.3 */
245         83      /* 3.0 */
246 };
247
248 static __inline struct amd_srb *
249 amdgetsrb(struct amd_softc * amd)
250 {
251         int     intflag;
252         struct amd_srb *    pSRB;
253
254         intflag = splcam();
255         pSRB = TAILQ_FIRST(&amd->free_srbs);
256         if (pSRB)
257                 TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
258         splx(intflag);
259         return (pSRB);
260 }
261
262 static void
263 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
264 {
265         struct scsi_request_sense sense_cmd;
266         u_int8_t *cdb;
267         u_int cdb_len;
268
269         if (srb->SRBFlag & AUTO_REQSENSE) {
270                 sense_cmd.opcode = REQUEST_SENSE;
271                 sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
272                 sense_cmd.unused[0] = 0;
273                 sense_cmd.unused[1] = 0;
274                 sense_cmd.length = sizeof(struct scsi_sense_data);
275                 sense_cmd.control = 0;
276                 cdb = &sense_cmd.opcode;
277                 cdb_len = sizeof(sense_cmd);
278         } else {
279                 cdb = &srb->CmdBlock[0];
280                 cdb_len = srb->ScsiCmdLen;
281         }
282         amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
283 }
284
285 /*
286  * Attempt to start a waiting transaction.  Interrupts must be disabled
287  * upon entry to this function.
288  */
289 static void
290 amdrunwaiting(struct amd_softc *amd) {
291         struct amd_srb *srb;
292
293         if (amd->last_phase != SCSI_BUS_FREE)
294                 return;
295
296         srb = TAILQ_FIRST(&amd->waiting_srbs);
297         if (srb == NULL)
298                 return;
299         
300         if (amdstart(amd, srb) == 0) {
301                 TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
302                 TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
303         }
304 }
305
306 static void
307 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
308 {
309         struct   amd_srb *srb;
310         union    ccb *ccb;
311         struct   amd_softc *amd;
312         int      s;
313
314         srb = (struct amd_srb *)arg;
315         ccb = srb->pccb;
316         amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
317
318         if (error != 0) {
319                 if (error != EFBIG)
320                         printf("amd%d: Unexepected error 0x%x returned from "
321                                "bus_dmamap_load\n", amd->unit, error);
322                 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
323                         xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
324                         ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
325                 }
326                 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
327                 xpt_done(ccb);
328                 return;
329         }
330
331         if (nseg != 0) {
332                 struct amd_sg *sg;
333                 bus_dma_segment_t *end_seg;
334                 bus_dmasync_op_t op;
335
336                 end_seg = dm_segs + nseg;
337
338                 /* Copy the segments into our SG list */
339                 srb->pSGlist = &srb->SGsegment[0];
340                 sg = srb->pSGlist;
341                 while (dm_segs < end_seg) {
342                         sg->SGXLen = dm_segs->ds_len;
343                         sg->SGXPtr = dm_segs->ds_addr;
344                         sg++;
345                         dm_segs++;
346                 }
347
348                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
349                         op = BUS_DMASYNC_PREREAD;
350                 else
351                         op = BUS_DMASYNC_PREWRITE;
352
353                 bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
354
355         }
356         srb->SGcount = nseg;
357         srb->SGIndex = 0;
358         srb->AdaptStatus = 0;
359         srb->TargetStatus = 0;
360         srb->MsgCnt = 0;
361         srb->SRBStatus = 0;
362         srb->SRBFlag = 0;
363         srb->SRBState = 0;
364         srb->TotalXferredLen = 0;
365         srb->SGPhysAddr = 0;
366         srb->SGToBeXferLen = 0;
367         srb->EndMessage = 0;
368
369         s = splcam();
370
371         /*
372          * Last time we need to check if this CCB needs to
373          * be aborted.
374          */
375         if (ccb->ccb_h.status != CAM_REQ_INPROG) {
376                 if (nseg != 0)
377                         bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
378                 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
379                 xpt_done(ccb);
380                 splx(s);
381                 return;
382         }
383         ccb->ccb_h.status |= CAM_SIM_QUEUED;
384 #if 0
385         /* XXX Need a timeout handler */
386         ccb->ccb_h.timeout_ch =
387             timeout(amdtimeout, (caddr_t)srb,
388                     (ccb->ccb_h.timeout * hz) / 1000);
389 #endif
390         TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
391         amdrunwaiting(amd);
392         splx(s);
393 }
394
395 static void
396 amd_action(struct cam_sim * psim, union ccb * pccb)
397 {
398         struct amd_softc *    amd;
399         u_int   target_id;
400
401         CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
402
403         amd = (struct amd_softc *) cam_sim_softc(psim);
404         target_id = pccb->ccb_h.target_id;
405
406         switch (pccb->ccb_h.func_code) {
407         case XPT_SCSI_IO:
408         {
409                 struct amd_srb *    pSRB;
410                 struct ccb_scsiio *pcsio;
411
412                 pcsio = &pccb->csio;
413
414                 /*
415                  * Assign an SRB and connect it with this ccb.
416                  */
417                 pSRB = amdgetsrb(amd);
418
419                 if (!pSRB) {
420                         /* Freeze SIMQ */
421                         pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
422                         xpt_done(pccb);
423                         return;
424                 }
425                 pSRB->pccb = pccb;
426                 pccb->ccb_h.ccb_srb_ptr = pSRB;
427                 pccb->ccb_h.ccb_amd_ptr = amd;
428                 pSRB->ScsiCmdLen = pcsio->cdb_len;
429                 bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
430                 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
431                         if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
432                                 /*
433                                  * We've been given a pointer
434                                  * to a single buffer.
435                                  */
436                                 if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
437                                         int s;
438                                         int error;
439
440                                         s = splsoftvm();
441                                         error =
442                                             bus_dmamap_load(amd->buffer_dmat,
443                                                             pSRB->dmamap,
444                                                             pcsio->data_ptr,
445                                                             pcsio->dxfer_len,
446                                                             amdexecutesrb,
447                                                             pSRB, /*flags*/0);
448                                         if (error == EINPROGRESS) {
449                                                 /*
450                                                  * So as to maintain
451                                                  * ordering, freeze the
452                                                  * controller queue
453                                                  * until our mapping is
454                                                  * returned.
455                                                  */
456                                                 xpt_freeze_simq(amd->psim, 1);
457                                                 pccb->ccb_h.status |=
458                                                     CAM_RELEASE_SIMQ;
459                                         }
460                                         splx(s);
461                                 } else {
462                                         struct bus_dma_segment seg;
463
464                                         /* Pointer to physical buffer */
465                                         seg.ds_addr =
466                                             (bus_addr_t)pcsio->data_ptr;
467                                         seg.ds_len = pcsio->dxfer_len;
468                                         amdexecutesrb(pSRB, &seg, 1, 0);
469                                 }
470                         } else {
471                                 struct bus_dma_segment *segs;
472
473                                 if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
474                                  || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
475                                         TAILQ_INSERT_HEAD(&amd->free_srbs,
476                                                           pSRB, links);
477                                         pccb->ccb_h.status = CAM_PROVIDE_FAIL;
478                                         xpt_done(pccb);
479                                         return;
480                                 }
481
482                                 /* Just use the segments provided */
483                                 segs =
484                                     (struct bus_dma_segment *)pcsio->data_ptr;
485                                 amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
486                         }
487                 } else
488                         amdexecutesrb(pSRB, NULL, 0, 0);
489                 break;
490         }
491         case XPT_PATH_INQ:
492         {
493                 struct ccb_pathinq *cpi = &pccb->cpi;
494
495                 cpi->version_num = 1;
496                 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
497                 cpi->target_sprt = 0;
498                 cpi->hba_misc = 0;
499                 cpi->hba_eng_cnt = 0;
500                 cpi->max_target = 7;
501                 cpi->max_lun = amd->max_lun;    /* 7 or 0 */
502                 cpi->initiator_id = amd->AdaptSCSIID;
503                 cpi->bus_id = cam_sim_bus(psim);
504                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
505                 strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
506                 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
507                 cpi->unit_number = cam_sim_unit(psim);
508                 cpi->transport = XPORT_SPI;
509                 cpi->transport_version = 2;
510                 cpi->protocol = PROTO_SCSI;
511                 cpi->protocol_version = SCSI_REV_2;
512                 cpi->ccb_h.status = CAM_REQ_CMP;
513                 xpt_done(pccb);
514                 break;
515         }
516         case XPT_ABORT:
517                 pccb->ccb_h.status = CAM_REQ_INVALID;
518                 xpt_done(pccb);
519                 break;
520         case XPT_RESET_BUS:
521         {
522
523                 int     i;
524
525                 amd_ResetSCSIBus(amd);
526                 amd->ACBFlag = 0;
527
528                 for (i = 0; i < 500; i++) {
529                         DELAY(1000);    /* Wait until our interrupt
530                                          * handler sees it */
531                 }
532
533                 pccb->ccb_h.status = CAM_REQ_CMP;
534                 xpt_done(pccb);
535                 break;
536         }
537         case XPT_RESET_DEV:
538                 pccb->ccb_h.status = CAM_REQ_INVALID;
539                 xpt_done(pccb);
540                 break;
541         case XPT_TERM_IO:
542                 pccb->ccb_h.status = CAM_REQ_INVALID;
543                 xpt_done(pccb);
544                 break;
545         case XPT_GET_TRAN_SETTINGS:
546         {
547                 struct ccb_trans_settings *cts = &pccb->cts;
548                 struct amd_target_info *targ_info = &amd->tinfo[target_id];
549                 struct amd_transinfo *tinfo;
550                 int     intflag;
551                 struct ccb_trans_settings_scsi *scsi =
552                     &cts->proto_specific.scsi;
553                 struct ccb_trans_settings_spi *spi =
554                     &cts->xport_specific.spi;
555
556                 cts->protocol = PROTO_SCSI;
557                 cts->protocol_version = SCSI_REV_2;
558                 cts->transport = XPORT_SPI;
559                 cts->transport_version = 2;
560
561                 intflag = splcam();
562                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
563                         /* current transfer settings */
564                         if (targ_info->disc_tag & AMD_CUR_DISCENB) {
565                                 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
566                         } else {
567                                 spi->flags = 0;
568                         }
569                         if (targ_info->disc_tag & AMD_CUR_TAGENB) {
570                                 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
571                         } else {
572                                 scsi->flags = 0;
573                         }
574                         tinfo = &targ_info->current;
575                 } else {
576                         /* default(user) transfer settings */
577                         if (targ_info->disc_tag & AMD_USR_DISCENB) {
578                                 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
579                         } else {
580                                 spi->flags = 0;
581                         }
582                         if (targ_info->disc_tag & AMD_USR_TAGENB) {
583                                 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
584                         } else {
585                                 scsi->flags = 0;
586                         }
587                         tinfo = &targ_info->user;
588                 }
589                 spi->sync_period = tinfo->period;
590                 spi->sync_offset = tinfo->offset;
591                 splx(intflag);
592
593                 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
594                 spi->valid = CTS_SPI_VALID_SYNC_RATE
595                            | CTS_SPI_VALID_SYNC_OFFSET
596                            | CTS_SPI_VALID_BUS_WIDTH
597                            | CTS_SPI_VALID_DISC;
598                 scsi->valid = CTS_SCSI_VALID_TQ;
599                 pccb->ccb_h.status = CAM_REQ_CMP;
600                 xpt_done(pccb);
601                 break;
602         }
603 #define IS_CURRENT_SETTINGS(c)  (c->type == CTS_TYPE_CURRENT_SETTINGS)
604 #define IS_USER_SETTINGS(c)     (c->type == CTS_TYPE_USER_SETTINGS)
605         case XPT_SET_TRAN_SETTINGS:
606         {
607                 struct ccb_trans_settings *cts = &pccb->cts;
608                 struct amd_target_info *targ_info;
609                 u_int  update_type = 0;
610                 int    intflag;
611                 int    last_entry;
612                 struct ccb_trans_settings_scsi *scsi =
613                     &cts->proto_specific.scsi;
614                 struct ccb_trans_settings_spi *spi =
615                     &cts->xport_specific.spi;
616                 if (IS_CURRENT_SETTINGS(cts)) {
617                         update_type |= AMD_TRANS_GOAL;
618                 } else if (IS_USER_SETTINGS(cts)) {
619                         update_type |= AMD_TRANS_USER;
620                 }
621                 if (update_type == 0
622                  || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
623                         cts->ccb_h.status = CAM_REQ_INVALID;
624                         xpt_done(pccb);
625                 }
626
627                 intflag = splcam();
628                 targ_info = &amd->tinfo[target_id];
629
630                 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
631                         if (update_type & AMD_TRANS_GOAL) {
632                                 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
633                                    != 0) {
634                                         targ_info->disc_tag |= AMD_CUR_DISCENB;
635                                 } else {
636                                         targ_info->disc_tag &= ~AMD_CUR_DISCENB;
637                                 }
638                         }
639                         if (update_type & AMD_TRANS_USER) {
640                                 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
641                                    != 0) {
642                                         targ_info->disc_tag |= AMD_USR_DISCENB;
643                                 } else {
644                                         targ_info->disc_tag &= ~AMD_USR_DISCENB;
645                                 }
646                         }
647                 }
648                 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
649                         if (update_type & AMD_TRANS_GOAL) {
650                                 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
651                                    != 0) {
652                                         targ_info->disc_tag |= AMD_CUR_TAGENB;
653                                 } else {
654                                         targ_info->disc_tag &= ~AMD_CUR_TAGENB;
655                                 }
656                         }
657                         if (update_type & AMD_TRANS_USER) {
658                                 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
659                                     != 0) {
660                                         targ_info->disc_tag |= AMD_USR_TAGENB;
661                                 } else {
662                                         targ_info->disc_tag &= ~AMD_USR_TAGENB;
663                                 }
664                         }
665                 }
666
667                 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
668                         if (update_type & AMD_TRANS_GOAL)
669                                 spi->sync_offset = targ_info->goal.offset;
670                         else
671                                 spi->sync_offset = targ_info->user.offset;
672                 }
673
674                 if (spi->sync_offset > AMD_MAX_SYNC_OFFSET)
675                         spi->sync_offset = AMD_MAX_SYNC_OFFSET;
676
677                 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
678                         if (update_type & AMD_TRANS_GOAL)
679                                 spi->sync_period = targ_info->goal.period;
680                         else
681                                 spi->sync_period = targ_info->user.period;
682                 }
683
684                 last_entry = sizeof(tinfo_sync_period) - 1;
685                 if ((spi->sync_period != 0)
686                  && (spi->sync_period < tinfo_sync_period[0]))
687                         spi->sync_period = tinfo_sync_period[0];
688                 if (spi->sync_period > tinfo_sync_period[last_entry])
689                         spi->sync_period = 0;
690                 if (spi->sync_offset == 0)
691                         spi->sync_period = 0;
692
693                 if ((update_type & AMD_TRANS_USER) != 0) {
694                         targ_info->user.period = spi->sync_period;
695                         targ_info->user.offset = spi->sync_offset;
696                 }
697                 if ((update_type & AMD_TRANS_GOAL) != 0) {
698                         targ_info->goal.period = spi->sync_period;
699                         targ_info->goal.offset = spi->sync_offset;
700                 }
701                 splx(intflag);
702                 pccb->ccb_h.status = CAM_REQ_CMP;
703                 xpt_done(pccb);
704                 break;
705         }
706         case XPT_CALC_GEOMETRY:
707         {
708                 int     extended;
709
710                 extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
711                 cam_calc_geometry(&pccb->ccg, extended);
712                 xpt_done(pccb);
713                 break;
714         }
715         default:
716                 pccb->ccb_h.status = CAM_REQ_INVALID;
717                 xpt_done(pccb);
718                 break;
719         }
720 }
721
722 static void
723 amd_poll(struct cam_sim * psim)
724 {
725         amd_intr(cam_sim_softc(psim));
726 }
727
728 static u_int8_t * 
729 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
730 {
731         intptr_t   dataPtr;
732         struct ccb_scsiio *pcsio;
733         u_int8_t   i;
734         struct amd_sg *    pseg;
735
736         dataPtr = 0;
737         pcsio = &pSRB->pccb->csio;
738
739         dataPtr = (intptr_t) pcsio->data_ptr;
740         pseg = pSRB->SGsegment;
741         for (i = 0; i < pSRB->SGIndex; i++) {
742                 dataPtr += (int) pseg->SGXLen;
743                 pseg++;
744         }
745         dataPtr += (int) xferCnt;
746         return ((u_int8_t *) dataPtr);
747 }
748
749 static void
750 amd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
751 {
752         bus_addr_t *baddr;
753
754         baddr = (bus_addr_t *)arg;
755         *baddr = segs->ds_addr;
756 }
757
758 static void
759 ResetDevParam(struct amd_softc * amd)
760 {
761         u_int target;
762
763         for (target = 0; target <= amd->max_id; target++) {
764                 if (amd->AdaptSCSIID != target) {
765                         amdsetsync(amd, target, /*clockrate*/0,
766                                    /*period*/0, /*offset*/0, AMD_TRANS_CUR);
767                 }
768         }
769 }
770
771 static void
772 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
773                  u_int tag, struct srb_queue *queue, cam_status status)
774 {
775         struct amd_srb *srb;
776         struct amd_srb *next_srb;
777
778         for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
779                 union ccb *ccb;
780
781                 next_srb = TAILQ_NEXT(srb, links);
782                 if (srb->pccb->ccb_h.target_id != target
783                  && target != CAM_TARGET_WILDCARD)
784                         continue;
785
786                 if (srb->pccb->ccb_h.target_lun != lun
787                  && lun != CAM_LUN_WILDCARD)
788                         continue;
789
790                 if (srb->TagNumber != tag
791                  && tag != AMD_TAG_WILDCARD)
792                         continue;
793                 
794                 ccb = srb->pccb;
795                 TAILQ_REMOVE(queue, srb, links);
796                 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
797                 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
798                  && (status & CAM_DEV_QFRZN) != 0)
799                         xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
800                 ccb->ccb_h.status = status;
801                 xpt_done(ccb);
802         }
803
804 }
805
806 static void
807 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
808            u_int period, u_int offset, u_int type)
809 {
810         struct amd_target_info *tinfo;
811         u_int old_period;
812         u_int old_offset;
813
814         tinfo = &amd->tinfo[target];
815         old_period = tinfo->current.period;
816         old_offset = tinfo->current.offset;
817         if ((type & AMD_TRANS_CUR) != 0
818          && (old_period != period || old_offset != offset)) {
819                 struct cam_path *path;
820
821                 tinfo->current.period = period;
822                 tinfo->current.offset = offset;
823                 tinfo->sync_period_reg = clockrate;
824                 tinfo->sync_offset_reg = offset;
825                 tinfo->CtrlR3 &= ~FAST_SCSI;
826                 tinfo->CtrlR4 &= ~EATER_25NS;
827                 if (clockrate > 7)
828                         tinfo->CtrlR4 |= EATER_25NS;
829                 else
830                         tinfo->CtrlR3 |= FAST_SCSI;
831
832                 if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
833                         amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
834                         amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
835                         amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
836                         amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
837                 }
838                 /* If possible, update the XPT's notion of our transfer rate */
839                 if (xpt_create_path(&path, /*periph*/NULL,
840                                     cam_sim_path(amd->psim), target,
841                                     CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
842                         struct ccb_trans_settings neg;
843                         struct ccb_trans_settings_spi *spi =
844                             &neg.xport_specific.spi;
845                         xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
846                         memset(&neg, 0, sizeof (neg));
847                         spi->sync_period = period;
848                         spi->sync_offset = offset;
849                         spi->valid = CTS_SPI_VALID_SYNC_RATE
850                                   | CTS_SPI_VALID_SYNC_OFFSET;
851                         xpt_async(AC_TRANSFER_NEG, path, &neg);
852                         xpt_free_path(path);    
853                 }
854         }
855         if ((type & AMD_TRANS_GOAL) != 0) {
856                 tinfo->goal.period = period;
857                 tinfo->goal.offset = offset;
858         }
859
860         if ((type & AMD_TRANS_USER) != 0) {
861                 tinfo->user.period = period;
862                 tinfo->user.offset = offset;
863         }
864 }
865
866 static void
867 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
868 {
869         panic("Implement me!\n");
870 }
871
872
873 #if 0
874 /*
875  **********************************************************************
876  * Function : amd_reset (struct amd_softc * amd)
877  * Purpose  : perform a hard reset on the SCSI bus( and AMD chip).
878  * Inputs   : cmd - command which caused the SCSI RESET
879  **********************************************************************
880  */
881 static void
882 amd_reset(struct amd_softc * amd)
883 {
884         int        intflag;
885         u_int8_t   bval;
886         u_int16_t  i;
887
888
889 #ifdef AMD_DEBUG0
890         printf("DC390: RESET");
891 #endif
892
893         intflag = splcam();
894         bval = amd_read8(amd, CNTLREG1);
895         bval |= DIS_INT_ON_SCSI_RST;
896         amd_write8(amd, CNTLREG1, bval);        /* disable interrupt */
897         amd_ResetSCSIBus(amd);
898
899         for (i = 0; i < 500; i++) {
900                 DELAY(1000);
901         }
902
903         bval = amd_read8(amd, CNTLREG1);
904         bval &= ~DIS_INT_ON_SCSI_RST;
905         amd_write8(amd, CNTLREG1, bval);        /* re-enable interrupt */
906
907         amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
908         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
909
910         ResetDevParam(amd);
911         amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
912                          AMD_TAG_WILDCARD, &amd->running_srbs,
913                          CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
914         amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
915                          AMD_TAG_WILDCARD, &amd->waiting_srbs,
916                          CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
917         amd->active_srb = NULL;
918         amd->ACBFlag = 0;
919         splx(intflag);
920         return;
921 }
922
923 void
924 amd_timeout(void *arg1)
925 {
926         struct amd_srb *    pSRB;
927
928         pSRB = (struct amd_srb *) arg1;
929 }
930 #endif
931
932 static int
933 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
934 {
935         union ccb *pccb;
936         struct ccb_scsiio *pcsio;
937         struct amd_target_info *targ_info;
938         u_int identify_msg;
939         u_int command;
940         u_int target;
941         u_int lun;
942
943         pccb = pSRB->pccb;
944         pcsio = &pccb->csio;
945         target = pccb->ccb_h.target_id;
946         lun = pccb->ccb_h.target_lun;
947         targ_info = &amd->tinfo[target];
948
949         amd_clear_msg_state(amd);
950         amd_write8(amd, SCSIDESTIDREG, target);
951         amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
952         amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
953         amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
954         amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
955         amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
956         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
957
958         identify_msg = MSG_IDENTIFYFLAG | lun;
959         if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
960           && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
961           && (pSRB->CmdBlock[0] != REQUEST_SENSE)
962           && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
963                 identify_msg |= MSG_IDENTIFY_DISCFLAG;
964
965         amd_write8(amd, SCSIFIFOREG, identify_msg);
966         if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
967           || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
968                 pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
969         if (targ_info->current.period != targ_info->goal.period
970          || targ_info->current.offset != targ_info->goal.offset) {
971                 command = SEL_W_ATN_STOP;
972                 amdconstructsdtr(amd, targ_info->goal.period,
973                                  targ_info->goal.offset);
974         } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
975                 command = SEL_W_ATN2;
976                 pSRB->SRBState = SRB_START;
977                 amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
978                 amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
979         } else {
980                 command = SEL_W_ATN;
981                 pSRB->SRBState = SRB_START;
982         }
983         if (command != SEL_W_ATN_STOP)
984                 amdsetupcommand(amd, pSRB);
985
986         if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
987                 pSRB->SRBState = SRB_READY;
988                 return (1);
989         } else {
990                 amd->last_phase = SCSI_ARBITRATING;
991                 amd_write8(amd, SCSICMDREG, command);
992                 amd->active_srb = pSRB;
993                 amd->cur_target = target;
994                 amd->cur_lun = lun;
995                 return (0);
996         }
997 }
998
999 /*
1000  *  Catch an interrupt from the adapter.
1001  *  Process pending device interrupts.
1002  */
1003 static void 
1004 amd_intr(void   *arg)
1005 {
1006         struct amd_softc *amd;
1007         struct amd_srb *pSRB;
1008         u_int  internstat = 0;
1009         u_int  scsistat;
1010         u_int  intstat;
1011
1012         amd = (struct amd_softc *)arg;
1013
1014         if (amd == NULL) {
1015 #ifdef AMD_DEBUG0
1016                 printf("amd_intr: amd NULL return......");
1017 #endif
1018                 return;
1019         }
1020
1021         scsistat = amd_read8(amd, SCSISTATREG);
1022         if (!(scsistat & INTERRUPT)) {
1023 #ifdef AMD_DEBUG0
1024                 printf("amd_intr: scsistat = NULL ,return......");
1025 #endif
1026                 return;
1027         }
1028 #ifdef AMD_DEBUG_SCSI_PHASE
1029         printf("scsistat=%2x,", scsistat);
1030 #endif
1031
1032         internstat = amd_read8(amd, INTERNSTATREG);
1033         intstat = amd_read8(amd, INTSTATREG);
1034
1035 #ifdef AMD_DEBUG_SCSI_PHASE
1036         printf("intstat=%2x,", intstat);
1037 #endif
1038
1039         if (intstat & DISCONNECTED) {
1040                 amd_Disconnect(amd);
1041                 return;
1042         }
1043         if (intstat & RESELECTED) {
1044                 amd_Reselect(amd);
1045                 return;
1046         }
1047         if (intstat & INVALID_CMD) {
1048                 amd_InvalidCmd(amd);
1049                 return;
1050         }
1051         if (intstat & SCSI_RESET_) {
1052                 amd_ScsiRstDetect(amd);
1053                 return;
1054         }
1055         if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1056                 pSRB = amd->active_srb;
1057                 /*
1058                  * Run our state engine.  First perform
1059                  * post processing for the last phase we
1060                  * were in, followed by any processing
1061                  * required to handle the current phase.
1062                  */
1063                 scsistat =
1064                     amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1065                 amd->last_phase = scsistat & SCSI_PHASE_MASK;
1066                 (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1067         }
1068 }
1069
1070 static u_int
1071 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1072 {
1073         struct amd_sg *psgl;
1074         u_int32_t   ResidCnt, xferCnt;
1075
1076         if (!(pSRB->SRBState & SRB_XFERPAD)) {
1077                 if (scsistat & PARITY_ERR) {
1078                         pSRB->SRBStatus |= PARITY_ERROR;
1079                 }
1080                 if (scsistat & COUNT_2_ZERO) {
1081                         while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1082                                 ;
1083                         pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1084                         pSRB->SGIndex++;
1085                         if (pSRB->SGIndex < pSRB->SGcount) {
1086                                 pSRB->pSGlist++;
1087                                 psgl = pSRB->pSGlist;
1088                                 pSRB->SGPhysAddr = psgl->SGXPtr;
1089                                 pSRB->SGToBeXferLen = psgl->SGXLen;
1090                         } else {
1091                                 pSRB->SGToBeXferLen = 0;
1092                         }
1093                 } else {
1094                         ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1095                         ResidCnt += amd_read8(amd, CTCREG_LOW)
1096                                   | (amd_read8(amd, CTCREG_MID) << 8)
1097                                   | (amd_read8(amd, CURTXTCNTREG) << 16);
1098
1099                         xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1100                         pSRB->SGPhysAddr += xferCnt;
1101                         pSRB->TotalXferredLen += xferCnt;
1102                         pSRB->SGToBeXferLen = ResidCnt;
1103                 }
1104         }
1105         amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1106         return (scsistat);
1107 }
1108
1109 static u_int
1110 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1111 {
1112         u_int8_t bval;
1113         u_int16_t  i, residual;
1114         struct amd_sg *psgl;
1115         u_int32_t   ResidCnt, xferCnt;
1116         u_int8_t *  ptr;
1117
1118         if (!(pSRB->SRBState & SRB_XFERPAD)) {
1119                 if (scsistat & PARITY_ERR) {
1120                         pSRB->SRBStatus |= PARITY_ERROR;
1121                 }
1122                 if (scsistat & COUNT_2_ZERO) {
1123                         while (1) {
1124                                 bval = amd_read8(amd, DMA_Status);
1125                                 if ((bval & DMA_XFER_DONE) != 0)
1126                                         break;
1127                         }
1128                         amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1129
1130                         pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1131                         pSRB->SGIndex++;
1132                         if (pSRB->SGIndex < pSRB->SGcount) {
1133                                 pSRB->pSGlist++;
1134                                 psgl = pSRB->pSGlist;
1135                                 pSRB->SGPhysAddr = psgl->SGXPtr;
1136                                 pSRB->SGToBeXferLen = psgl->SGXLen;
1137                         } else {
1138                                 pSRB->SGToBeXferLen = 0;
1139                         }
1140                 } else {        /* phase changed */
1141                         residual = 0;
1142                         bval = amd_read8(amd, CURRENTFIFOREG);
1143                         while (bval & 0x1f) {
1144                                 if ((bval & 0x1f) == 1) {
1145                                         for (i = 0; i < 0x100; i++) {
1146                                                 bval = amd_read8(amd, CURRENTFIFOREG);
1147                                                 if (!(bval & 0x1f)) {
1148                                                         goto din_1;
1149                                                 } else if (i == 0x0ff) {
1150                                                         residual = 1;
1151                                                         goto din_1;
1152                                                 }
1153                                         }
1154                                 } else {
1155                                         bval = amd_read8(amd, CURRENTFIFOREG);
1156                                 }
1157                         }
1158         din_1:
1159                         amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1160                         for (i = 0; i < 0x8000; i++) {
1161                                 if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1162                                         break;
1163                         }
1164                         amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1165
1166                         ResidCnt = amd_read8(amd, CTCREG_LOW)
1167                                  | (amd_read8(amd, CTCREG_MID) << 8)
1168                                  | (amd_read8(amd, CURTXTCNTREG) << 16);
1169                         xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1170                         pSRB->SGPhysAddr += xferCnt;
1171                         pSRB->TotalXferredLen += xferCnt;
1172                         pSRB->SGToBeXferLen = ResidCnt;
1173                         if (residual) {
1174                                 /* get residual byte */ 
1175                                 bval = amd_read8(amd, SCSIFIFOREG);
1176                                 ptr = phystovirt(pSRB, xferCnt);
1177                                 *ptr = bval;
1178                                 pSRB->SGPhysAddr++;
1179                                 pSRB->TotalXferredLen++;
1180                                 pSRB->SGToBeXferLen--;
1181                         }
1182                 }
1183         }
1184         return (scsistat);
1185 }
1186
1187 static u_int
1188 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1189 {
1190         pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1191         /* get message */
1192         pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1193         pSRB->SRBState = SRB_COMPLETED;
1194         amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1195         return (SCSI_NOP0);
1196 }
1197
1198 static u_int
1199 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1200 {
1201         if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1202                 scsistat = SCSI_NOP0;
1203         }
1204         return (scsistat);
1205 }
1206
1207 static u_int
1208 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1209 {
1210         int done;
1211         
1212         amd->msgin_buf[amd->msgin_index]  = amd_read8(amd, SCSIFIFOREG);
1213
1214         done = amdparsemsg(amd);
1215         if (done)
1216                 amd->msgin_index = 0;
1217         else 
1218                 amd->msgin_index++;
1219         return (SCSI_NOP0);
1220 }
1221
1222 static int
1223 amdparsemsg(struct amd_softc *amd)
1224 {
1225         int     reject;
1226         int     done;
1227         int     response;
1228
1229         done = FALSE;
1230         response = FALSE;
1231         reject = FALSE;
1232
1233         /*
1234          * Parse as much of the message as is availible,
1235          * rejecting it if we don't support it.  When
1236          * the entire message is availible and has been
1237          * handled, return TRUE indicating that we have
1238          * parsed an entire message.
1239          */
1240         switch (amd->msgin_buf[0]) {
1241         case MSG_DISCONNECT:
1242                 amd->active_srb->SRBState = SRB_DISCONNECT;
1243                 amd->disc_count[amd->cur_target][amd->cur_lun]++;
1244                 done = TRUE;
1245                 break;
1246         case MSG_SIMPLE_Q_TAG:
1247         {
1248                 struct amd_srb *disc_srb;
1249
1250                 if (amd->msgin_index < 1)
1251                         break;          
1252                 disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1253                 if (amd->active_srb != NULL
1254                  || disc_srb->SRBState != SRB_DISCONNECT
1255                  || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1256                  || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1257                         printf("amd%d: Unexpected tagged reselection "
1258                                "for target %d, Issuing Abort\n", amd->unit,
1259                                amd->cur_target);
1260                         amd->msgout_buf[0] = MSG_ABORT;
1261                         amd->msgout_len = 1;
1262                         response = TRUE;
1263                         break;
1264                 }
1265                 amd->active_srb = disc_srb;
1266                 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1267                 done = TRUE;
1268                 break;
1269         }
1270         case MSG_MESSAGE_REJECT:
1271                 response = amdhandlemsgreject(amd);
1272                 if (response == FALSE)
1273                         amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1274                 /* FALLTHROUGH */
1275         case MSG_NOOP:
1276                 done = TRUE;
1277                 break;
1278         case MSG_EXTENDED:
1279         {
1280                 u_int clockrate;
1281                 u_int period;
1282                 u_int offset;
1283                 u_int saved_offset;
1284
1285                 /* Wait for enough of the message to begin validation */
1286                 if (amd->msgin_index < 1)
1287                         break;
1288                 if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1289                         reject = TRUE;
1290                         break;
1291                 }
1292
1293                 /* Wait for opcode */
1294                 if (amd->msgin_index < 2)
1295                         break;
1296
1297                 if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1298                         reject = TRUE;
1299                         break;
1300                 }
1301
1302                 /*
1303                  * Wait until we have both args before validating
1304                  * and acting on this message.
1305                  *
1306                  * Add one to MSG_EXT_SDTR_LEN to account for
1307                  * the extended message preamble.
1308                  */
1309                 if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1310                         break;
1311
1312                 period = amd->msgin_buf[3];
1313                 saved_offset = offset = amd->msgin_buf[4];
1314                 clockrate = amdfindclockrate(amd, &period);
1315                 if (offset > AMD_MAX_SYNC_OFFSET)
1316                         offset = AMD_MAX_SYNC_OFFSET;
1317                 if (period == 0 || offset == 0) {
1318                         offset = 0;
1319                         period = 0;
1320                         clockrate = 0;
1321                 }
1322                 amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1323                            AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1324
1325                 /*
1326                  * See if we initiated Sync Negotiation
1327                  * and didn't have to fall down to async
1328                  * transfers.
1329                  */
1330                 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1331                         /* We started it */
1332                         if (saved_offset != offset) {
1333                                 /* Went too low - force async */
1334                                 reject = TRUE;
1335                         }
1336                 } else {
1337                         /*
1338                          * Send our own SDTR in reply
1339                          */
1340                         if (bootverbose)
1341                                 printf("Sending SDTR!\n");
1342                         amd->msgout_index = 0;
1343                         amd->msgout_len = 0;
1344                         amdconstructsdtr(amd, period, offset);
1345                         amd->msgout_index = 0;
1346                         response = TRUE;
1347                 }
1348                 done = TRUE;
1349                 break;
1350         }
1351         case MSG_SAVEDATAPOINTER:
1352         case MSG_RESTOREPOINTERS:
1353                 /* XXX Implement!!! */
1354                 done = TRUE;
1355                 break;
1356         default:
1357                 reject = TRUE;
1358                 break;
1359         }
1360
1361         if (reject) {
1362                 amd->msgout_index = 0;
1363                 amd->msgout_len = 1;
1364                 amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1365                 done = TRUE;
1366                 response = TRUE;
1367         }
1368
1369         if (response)
1370                 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1371
1372         if (done && !response)
1373                 /* Clear the outgoing message buffer */
1374                 amd->msgout_len = 0;
1375
1376         /* Drop Ack */
1377         amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1378
1379         return (done);
1380 }
1381
1382 static u_int
1383 amdfindclockrate(struct amd_softc *amd, u_int *period)
1384 {
1385         u_int i;
1386         u_int clockrate;
1387
1388         for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1389                 u_int8_t *table_entry;
1390
1391                 table_entry = &tinfo_sync_period[i];
1392                 if (*period <= *table_entry) {
1393                         /*
1394                          * When responding to a target that requests
1395                          * sync, the requested rate may fall between
1396                          * two rates that we can output, but still be
1397                          * a rate that we can receive.  Because of this,
1398                          * we want to respond to the target with
1399                          * the same rate that it sent to us even
1400                          * if the period we use to send data to it
1401                          * is lower.  Only lower the response period
1402                          * if we must.
1403                          */ 
1404                         if (i == 0) {
1405                                 *period = *table_entry;
1406                         }
1407                         break;
1408                 }
1409         }
1410
1411         if (i == sizeof(tinfo_sync_period)) {
1412                 /* Too slow for us.  Use asnyc transfers. */
1413                 *period = 0;
1414                 clockrate = 0;
1415         } else
1416                 clockrate = i + 4;
1417
1418         return (clockrate);
1419 }
1420
1421 /*
1422  * See if we sent a particular extended message to the target.
1423  * If "full" is true, the target saw the full message.
1424  * If "full" is false, the target saw at least the first
1425  * byte of the message.
1426  */
1427 static int
1428 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1429 {
1430         int found;
1431         int index;
1432
1433         found = FALSE;
1434         index = 0;
1435
1436         while (index < amd->msgout_len) {
1437                 if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1438                  || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1439                         index++;
1440                 else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1441                       && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1442                         /* Skip tag type and tag id */
1443                         index += 2;
1444                 } else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1445                         /* Found a candidate */
1446                         if (amd->msgout_buf[index+2] == msgtype) {
1447                                 u_int end_index;
1448
1449                                 end_index = index + 1
1450                                           + amd->msgout_buf[index + 1];
1451                                 if (full) {
1452                                         if (amd->msgout_index > end_index)
1453                                                 found = TRUE;
1454                                 } else if (amd->msgout_index > index)
1455                                         found = TRUE;
1456                         }
1457                         break;
1458                 } else {
1459                         panic("amdsentmsg: Inconsistent msg buffer");
1460                 }
1461         }
1462         return (found);
1463 }
1464
1465 static void
1466 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1467 {
1468         amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1469         amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1470         amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1471         amd->msgout_buf[amd->msgout_index++] = period;
1472         amd->msgout_buf[amd->msgout_index++] = offset;
1473         amd->msgout_len += 5;
1474 }
1475
1476 static int
1477 amdhandlemsgreject(struct amd_softc *amd)
1478 {
1479         /*
1480          * If we had an outstanding SDTR for this
1481          * target, this is a signal that the target
1482          * is refusing negotiation.  Also watch out
1483          * for rejected tag messages.
1484          */
1485         struct  amd_srb *srb;
1486         struct  amd_target_info *targ_info;
1487         int     response = FALSE;
1488
1489         srb = amd->active_srb;
1490         targ_info = &amd->tinfo[amd->cur_target];
1491         if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1492                 /* note asynch xfers and clear flag */
1493                 amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1494                            /*period*/0, /*offset*/0,
1495                            AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1496                 printf("amd%d:%d: refuses synchronous negotiation. "
1497                        "Using asynchronous transfers\n",
1498                        amd->unit, amd->cur_target);
1499         } else if ((srb != NULL)
1500                 && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1501                 struct  ccb_trans_settings neg;
1502                 struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi;
1503
1504                 printf("amd%d:%d: refuses tagged commands.  Performing "
1505                        "non-tagged I/O\n", amd->unit, amd->cur_target);
1506
1507                 amdsettags(amd, amd->cur_target, FALSE);
1508                 memset(&neg, 0, sizeof (neg));
1509                 scsi->valid = CTS_SCSI_VALID_TQ;
1510                 xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1511                 xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1512
1513                 /*
1514                  * Resend the identify for this CCB as the target
1515                  * may believe that the selection is invalid otherwise.
1516                  */
1517                 if (amd->msgout_len != 0)
1518                         bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1519                               amd->msgout_len);
1520                 amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1521                                     | srb->pccb->ccb_h.target_lun;
1522                 amd->msgout_len++;
1523                 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1524                   && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1525                         amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1526
1527                 srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1528
1529                 /*
1530                  * Requeue all tagged commands for this target
1531                  * currently in our posession so they can be
1532                  * converted to untagged commands.
1533                  */
1534                 amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1535                                  AMD_TAG_WILDCARD, &amd->waiting_srbs,
1536                                  CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1537         } else {
1538                 /*
1539                  * Otherwise, we ignore it.
1540                  */
1541                 printf("amd%d:%d: Message reject received -- ignored\n",
1542                        amd->unit, amd->cur_target);
1543         }
1544         return (response);
1545 }
1546
1547 #if 0
1548         if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1549                 if (bval == MSG_DISCONNECT) {
1550                         pSRB->SRBState = SRB_DISCONNECT;
1551                 } else if (bval == MSG_SAVEDATAPOINTER) {
1552                         goto min6;
1553                 } else if ((bval == MSG_EXTENDED)
1554                         || ((bval >= MSG_SIMPLE_Q_TAG)
1555                          && (bval <= MSG_ORDERED_Q_TAG))) {
1556                         pSRB->SRBState |= SRB_MSGIN_MULTI;
1557                         pSRB->MsgInBuf[0] = bval;
1558                         pSRB->MsgCnt = 1;
1559                         pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1560                 } else if (bval == MSG_MESSAGE_REJECT) {
1561                         amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1562
1563                         if (pSRB->SRBState & DO_SYNC_NEGO) {
1564                                 goto set_async;
1565                         }
1566                 } else if (bval == MSG_RESTOREPOINTERS) {
1567                         goto min6;
1568                 } else {
1569                         goto min6;
1570                 }
1571         } else {                /* minx: */
1572                 *pSRB->pMsgPtr = bval;
1573                 pSRB->MsgCnt++;
1574                 pSRB->pMsgPtr++;
1575                 if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1576                  && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1577                         if (pSRB->MsgCnt == 2) {
1578                                 pSRB->SRBState = 0;
1579                                 pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1580                                 if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1581                                         pSRB = amd->pTmpSRB;
1582                                         pSRB->SRBState = SRB_UNEXPECT_RESEL;
1583                                         pDCB->pActiveSRB = pSRB;
1584                                         pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1585                                         EnableMsgOut2(amd, pSRB);
1586                                 } else {
1587                                         if (pDCB->DCBFlag & ABORT_DEV_) {
1588                                                 pSRB->SRBState = SRB_ABORT_SENT;
1589                                                 EnableMsgOut1(amd, pSRB);
1590                                         }
1591                                         pDCB->pActiveSRB = pSRB;
1592                                         pSRB->SRBState = SRB_DATA_XFER;
1593                                 }
1594                         }
1595                 } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1596                         && (pSRB->MsgCnt == 5)) {
1597                         pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1598                         if ((pSRB->MsgInBuf[1] != 3)
1599                          || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */
1600                                 pSRB->MsgCnt = 1;
1601                                 pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1602                                 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1603                         } else if (!(pSRB->MsgInBuf[3])
1604                                 || !(pSRB->MsgInBuf[4])) {
1605                 set_async:      /* set async */
1606
1607                                 pDCB = pSRB->pSRBDCB;
1608                                 /* disable sync & sync nego */
1609                                 pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1610                                 pDCB->SyncPeriod = 0;
1611                                 pDCB->SyncOffset = 0;
1612
1613                                 pDCB->tinfo.goal.period = 0;
1614                                 pDCB->tinfo.goal.offset = 0;
1615
1616                                 pDCB->tinfo.current.period = 0;
1617                                 pDCB->tinfo.current.offset = 0;
1618                                 pDCB->tinfo.current.width =
1619                                     MSG_EXT_WDTR_BUS_8_BIT;
1620
1621                                 pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1622                                 pDCB->CtrlR4 &= 0x3f;
1623                                 pDCB->CtrlR4 |= EATER_25NS; 
1624                                 goto re_prog;
1625                         } else {/* set sync */
1626
1627                                 pDCB = pSRB->pSRBDCB;
1628                                 /* enable sync & sync nego */
1629                                 pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1630
1631                                 /* set sync offset */
1632                                 pDCB->SyncOffset &= 0x0f0;
1633                                 pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1634
1635                                 /* set sync period */
1636                                 pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1637
1638                                 wval = (u_int16_t) pSRB->MsgInBuf[3];
1639                                 wval = wval << 2;
1640                                 wval--;
1641                                 wval1 = wval / 25;
1642                                 if ((wval1 * 25) != wval) {
1643                                         wval1++;
1644                                 }
1645                                 bval = FAST_CLK|FAST_SCSI;
1646                                 pDCB->CtrlR4 &= 0x3f;
1647                                 if (wval1 >= 8) {
1648                                         /* Fast SCSI */
1649                                         wval1--;
1650                                         bval = FAST_CLK;
1651                                         pDCB->CtrlR4 |= EATER_25NS;
1652                                 }
1653                                 pDCB->CtrlR3 = bval;
1654                                 pDCB->SyncPeriod = (u_int8_t) wval1;
1655
1656                                 pDCB->tinfo.goal.period =
1657                                     tinfo_sync_period[pDCB->SyncPeriod - 4];
1658                                 pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1659                                 pDCB->tinfo.current.period =
1660                                     tinfo_sync_period[pDCB->SyncPeriod - 4];
1661                                 pDCB->tinfo.current.offset = pDCB->SyncOffset;
1662
1663                                 /*
1664                                  * program SCSI control register
1665                                  */
1666                 re_prog:
1667                                 amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1668                                 amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1669                                 amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1670                                 amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1671                         }
1672                 }
1673         }
1674 min6:
1675         amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1676         return (SCSI_NOP0);
1677 }
1678 #endif
1679
1680 static u_int
1681 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1682 {
1683         DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1684         return (scsistat);
1685 }
1686
1687 static u_int 
1688 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1689 {
1690         DataIO_Comm(amd, pSRB, READ_DIRECTION);
1691         return (scsistat);
1692 }
1693
1694 static void
1695 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1696 {
1697         struct amd_sg *    psgl;
1698         u_int32_t   lval;
1699
1700         if (pSRB->SGIndex < pSRB->SGcount) {
1701                 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1702
1703                 if (!pSRB->SGToBeXferLen) {
1704                         psgl = pSRB->pSGlist;
1705                         pSRB->SGPhysAddr = psgl->SGXPtr;
1706                         pSRB->SGToBeXferLen = psgl->SGXLen;
1707                 }
1708                 lval = pSRB->SGToBeXferLen;
1709                 amd_write8(amd, CTCREG_LOW, lval);
1710                 amd_write8(amd, CTCREG_MID, lval >> 8);
1711                 amd_write8(amd, CURTXTCNTREG, lval >> 16);
1712
1713                 amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1714
1715                 amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1716
1717                 pSRB->SRBState = SRB_DATA_XFER;
1718
1719                 amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1720
1721                 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1722
1723                 amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1724         } else {                /* xfer pad */
1725                 if (pSRB->SGcount) {
1726                         pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1727                         pSRB->SRBStatus |= OVER_RUN;
1728                 }
1729                 amd_write8(amd, CTCREG_LOW, 0);
1730                 amd_write8(amd, CTCREG_MID, 0);
1731                 amd_write8(amd, CURTXTCNTREG, 0);
1732
1733                 pSRB->SRBState |= SRB_XFERPAD;
1734                 amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1735         }
1736 }
1737
1738 static u_int
1739 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1740 {
1741         amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1742         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1743
1744         amdsetupcommand(amd, srb);
1745
1746         srb->SRBState = SRB_COMMAND;
1747         amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1748         return (scsistat);
1749 }
1750
1751 static u_int
1752 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1753 {
1754         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1755         pSRB->SRBState = SRB_STATUS;
1756         amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1757         return (scsistat);
1758 }
1759
1760 static u_int
1761 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1762 {
1763         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1764
1765         if (amd->msgout_len == 0) {
1766                 amd->msgout_buf[0] = MSG_NOOP;
1767                 amd->msgout_len = 1;
1768         }
1769         amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1770         amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1771         return (scsistat);
1772 }
1773
1774 static u_int
1775 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1776 {
1777         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1778         amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1779         return (scsistat);
1780 }
1781
1782 static u_int
1783 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1784 {
1785         return (scsistat);
1786 }
1787
1788 static void
1789 amd_Disconnect(struct amd_softc * amd)
1790 {
1791         struct  amd_srb *srb;
1792         int     target;
1793         int     lun;
1794
1795         srb = amd->active_srb;
1796         amd->active_srb = NULL;
1797         amd->last_phase = SCSI_BUS_FREE;
1798         amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1799         target = amd->cur_target;
1800         lun = amd->cur_lun;
1801
1802         if (srb == NULL) {
1803                 /* Invalid reselection */
1804                 amdrunwaiting(amd);
1805         } else if (srb->SRBState & SRB_ABORT_SENT) {
1806                 /* Clean up and done this srb */
1807 #if 0
1808                 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1809                         /* XXX What about "done'ing" these srbs??? */
1810                         if (pSRB->pSRBDCB == pDCB) {
1811                                 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1812                                 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1813                         }
1814                 }
1815                 amdrunwaiting(amd);
1816 #endif
1817         } else {
1818                 if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1819                  || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1820                         srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1821                         goto disc1;
1822                 } else if (srb->SRBState & SRB_DISCONNECT) {
1823                         if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1824                                 amd->untagged_srbs[target][lun] = srb;
1825                         amdrunwaiting(amd);
1826                 } else if (srb->SRBState & SRB_COMPLETED) {
1827         disc1:
1828                         srb->SRBState = SRB_FREE;
1829                         SRBdone(amd, srb);
1830                 }
1831         }
1832         return;
1833 }
1834
1835 static void
1836 amd_Reselect(struct amd_softc *amd)
1837 {
1838         struct amd_target_info *tinfo;
1839         u_int16_t disc_count;
1840
1841         amd_clear_msg_state(amd);
1842         if (amd->active_srb != NULL) {
1843                 /* Requeue the SRB for our attempted Selection */
1844                 TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1845                 TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1846                 amd->active_srb = NULL;
1847         }
1848         /* get ID */
1849         amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1850         amd->cur_target ^= amd->HostID_Bit;
1851         amd->cur_target = ffs(amd->cur_target) - 1;
1852         amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1853         tinfo = &amd->tinfo[amd->cur_target];
1854         amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1855         disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1856         if (disc_count == 0) {
1857                 printf("amd%d: Unexpected reselection for target %d, "
1858                        "Issuing Abort\n", amd->unit, amd->cur_target);
1859                 amd->msgout_buf[0] = MSG_ABORT;
1860                 amd->msgout_len = 1;
1861                 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1862         }
1863         if (amd->active_srb != NULL) {
1864                 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1865                 amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1866         }
1867         
1868         amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1869         amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1870         amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1871         amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1872         amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1873         amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1874         amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1875         amd->last_phase = SCSI_NOP0;
1876 }
1877
1878 static void
1879 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1880 {
1881         u_int8_t   bval, i, status;
1882         union ccb *pccb;
1883         struct ccb_scsiio *pcsio;
1884         int        intflag;
1885         struct amd_sg *ptr2;
1886         u_int32_t   swlval;
1887
1888         pccb = pSRB->pccb;
1889         pcsio = &pccb->csio;
1890
1891         CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1892                   ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1893
1894         if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1895                 bus_dmasync_op_t op;
1896
1897                 if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1898                         op = BUS_DMASYNC_POSTREAD;
1899                 else
1900                         op = BUS_DMASYNC_POSTWRITE;
1901                 bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1902                 bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1903         }
1904
1905         status = pSRB->TargetStatus;
1906         pccb->ccb_h.status = CAM_REQ_CMP;
1907         if (pSRB->SRBFlag & AUTO_REQSENSE) {
1908                 pSRB->SRBFlag &= ~AUTO_REQSENSE;
1909                 pSRB->AdaptStatus = 0;
1910                 pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1911
1912                 if (status == SCSI_STATUS_CHECK_COND) {
1913                         pccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1914                         goto ckc_e;
1915                 }
1916                 *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1917
1918                 pcsio->sense_resid = pcsio->sense_len
1919                                    - pSRB->TotalXferredLen;
1920                 pSRB->TotalXferredLen = pSRB->Segment1[1];
1921                 if (pSRB->TotalXferredLen) {
1922                         /* ???? */
1923                         pcsio->resid = pcsio->dxfer_len
1924                                      - pSRB->TotalXferredLen;
1925                         /* The resid field contains valid data   */
1926                         /* Flush resid bytes on complete        */
1927                 } else {
1928                         pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1929                 }
1930                 bzero(&pcsio->sense_data, pcsio->sense_len);
1931                 bcopy(amd_get_sense_buf(amd, pSRB), &pcsio->sense_data,
1932                       pcsio->sense_len);
1933                 pccb->ccb_h.status = CAM_AUTOSNS_VALID;
1934                 goto ckc_e;
1935         }
1936         if (status) {
1937                 if (status == SCSI_STATUS_CHECK_COND) {
1938
1939                         if ((pSRB->SGIndex < pSRB->SGcount)
1940                          && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1941                                 bval = pSRB->SGcount;
1942                                 swlval = pSRB->SGToBeXferLen;
1943                                 ptr2 = pSRB->pSGlist;
1944                                 ptr2++;
1945                                 for (i = pSRB->SGIndex + 1; i < bval; i++) {
1946                                         swlval += ptr2->SGXLen;
1947                                         ptr2++;
1948                                 }
1949                                 /* ??????? */
1950                                 pcsio->resid = (u_int32_t) swlval;
1951
1952 #ifdef  AMD_DEBUG0
1953                                 printf("XferredLen=%8x,NotYetXferLen=%8x,",
1954                                         pSRB->TotalXferredLen, swlval);
1955 #endif
1956                         }
1957                         if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1958 #ifdef  AMD_DEBUG0
1959                                 printf("RequestSense..................\n");
1960 #endif
1961                                 RequestSense(amd, pSRB);
1962                                 return;
1963                         }
1964                         pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1965                         pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1966                         goto ckc_e;
1967                 } else if (status == SCSI_STATUS_QUEUE_FULL) {
1968                         pSRB->AdaptStatus = 0;
1969                         pSRB->TargetStatus = 0;
1970                         pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1971                         pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1972                         goto ckc_e;
1973                 } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1974                         pSRB->AdaptStatus = H_SEL_TIMEOUT;
1975                         pSRB->TargetStatus = 0;
1976
1977                         pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1978                         pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1979                 } else if (status == SCSI_STATUS_BUSY) {
1980 #ifdef AMD_DEBUG0
1981                         printf("DC390: target busy at %s %d\n",
1982                                __FILE__, __LINE__);
1983 #endif
1984                         pcsio->scsi_status = SCSI_STATUS_BUSY;
1985                         pccb->ccb_h.status = CAM_SCSI_BUSY;
1986                 } else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1987 #ifdef AMD_DEBUG0
1988                         printf("DC390: target reserved at %s %d\n",
1989                                __FILE__, __LINE__);
1990 #endif
1991                         pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1992                         pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1993                 } else {
1994                         pSRB->AdaptStatus = 0;
1995 #ifdef AMD_DEBUG0
1996                         printf("DC390: driver stuffup at %s %d\n",
1997                                __FILE__, __LINE__);
1998 #endif
1999                         pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2000                 }
2001         } else {
2002                 status = pSRB->AdaptStatus;
2003                 if (status & H_OVER_UNDER_RUN) {
2004                         pSRB->TargetStatus = 0;
2005
2006                         pccb->ccb_h.status = CAM_DATA_RUN_ERR;  
2007                 } else if (pSRB->SRBStatus & PARITY_ERROR) {
2008 #ifdef AMD_DEBUG0
2009                         printf("DC390: driver stuffup %s %d\n",
2010                                __FILE__, __LINE__);
2011 #endif
2012                         /* Driver failed to perform operation     */
2013                         pccb->ccb_h.status = CAM_UNCOR_PARITY;
2014                 } else {        /* No error */
2015                         pSRB->AdaptStatus = 0;
2016                         pSRB->TargetStatus = 0;
2017                         pcsio->resid = 0;
2018                         /* there is no error, (sense is invalid)  */
2019                 }
2020         }
2021 ckc_e:
2022         intflag = splcam();
2023         if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2024                 /* CAM request not yet complete =>device_Q frozen */
2025                 xpt_freeze_devq(pccb->ccb_h.path, 1);
2026                 pccb->ccb_h.status |= CAM_DEV_QFRZN;
2027         }
2028         TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2029         TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
2030         amdrunwaiting(amd);
2031         splx(intflag);
2032         xpt_done(pccb);
2033
2034 }
2035
2036 static void
2037 amd_ResetSCSIBus(struct amd_softc * amd)
2038 {
2039         int     intflag;
2040
2041         intflag = splcam();
2042         amd->ACBFlag |= RESET_DEV;
2043         amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2044         amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2045         splx(intflag);
2046         return;
2047 }
2048
2049 static void
2050 amd_ScsiRstDetect(struct amd_softc * amd)
2051 {
2052         int     intflag;
2053         u_int32_t   wlval;
2054
2055 #ifdef AMD_DEBUG0
2056         printf("amd_ScsiRstDetect \n");
2057 #endif
2058
2059         wlval = 1000;
2060         while (--wlval) {       /* delay 1 sec */
2061                 DELAY(1000);
2062         }
2063         intflag = splcam();
2064
2065         amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2066         amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2067
2068         if (amd->ACBFlag & RESET_DEV) {
2069                 amd->ACBFlag |= RESET_DONE;
2070         } else {
2071                 amd->ACBFlag |= RESET_DETECT;
2072                 ResetDevParam(amd);
2073                 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2074                                  AMD_TAG_WILDCARD, &amd->running_srbs,
2075                                  CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2076                 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2077                                  AMD_TAG_WILDCARD, &amd->waiting_srbs,
2078                                  CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2079                 amd->active_srb = NULL;
2080                 amd->ACBFlag = 0;
2081                 amdrunwaiting(amd);
2082         }
2083         splx(intflag);
2084         return;
2085 }
2086
2087 static void
2088 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2089 {
2090         union ccb *pccb;
2091         struct ccb_scsiio *pcsio;
2092
2093         pccb = pSRB->pccb;
2094         pcsio = &pccb->csio;
2095
2096         pSRB->SRBFlag |= AUTO_REQSENSE;
2097         pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2098         pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2099         pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2100         pSRB->Segment1[1] = pSRB->TotalXferredLen;
2101
2102         pSRB->AdaptStatus = 0;
2103         pSRB->TargetStatus = 0;
2104
2105         pSRB->Segmentx.SGXPtr = amd_get_sense_bufaddr(amd, pSRB);
2106         pSRB->Segmentx.SGXLen = amd_get_sense_bufsize(amd, pSRB);
2107
2108         pSRB->pSGlist = &pSRB->Segmentx;
2109         pSRB->SGcount = 1;
2110         pSRB->SGIndex = 0;
2111
2112         pSRB->CmdBlock[0] = REQUEST_SENSE;
2113         pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2114         pSRB->CmdBlock[2] = 0;
2115         pSRB->CmdBlock[3] = 0;
2116         pSRB->CmdBlock[4] = pcsio->sense_len;
2117         pSRB->CmdBlock[5] = 0;
2118         pSRB->ScsiCmdLen = 6;
2119
2120         pSRB->TotalXferredLen = 0;
2121         pSRB->SGToBeXferLen = 0;
2122         if (amdstart(amd, pSRB) != 0) {
2123                 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2124                 TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2125         }
2126 }
2127
2128 static void
2129 amd_InvalidCmd(struct amd_softc * amd)
2130 {
2131         struct amd_srb *srb;
2132
2133         srb = amd->active_srb;
2134         if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2135                 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2136 }
2137
2138 void 
2139 amd_linkSRB(struct amd_softc *amd)
2140 {
2141         u_int16_t  count, i;
2142         struct amd_srb *psrb;
2143         int error;
2144
2145         count = amd->SRBCount;
2146
2147         for (i = 0; i < count; i++) {
2148                 psrb = (struct amd_srb *)&amd->SRB_array[i];
2149                 psrb->TagNumber = i;
2150
2151                 /*
2152                  * Create the dmamap.  This is no longer optional!
2153                  *
2154                  * XXX Since there is no detach method in this driver,
2155                  * this does not get freed!
2156                  */
2157                 if ((error = bus_dmamap_create(amd->buffer_dmat, 0,
2158                                                &psrb->dmamap)) != 0) {
2159                         device_printf(amd->dev, "Error %d creating buffer "
2160                                       "dmamap!\n", error);
2161                         return;
2162                 }
2163                 TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2164         }
2165 }
2166
2167 static void
2168 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2169 {
2170         if (mode == ENABLE_CE) {
2171                 *regval = 0xc0;
2172         } else {
2173                 *regval = 0x80;
2174         }
2175         pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2176         if (mode == DISABLE_CE) {
2177                 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2178         }
2179         DELAY(160);
2180 }
2181
2182 static void
2183 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2184 {
2185         u_int bval;
2186
2187         bval = 0;
2188         if (Carry) {
2189                 bval = 0x40;
2190                 *regval = 0x80;
2191                 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2192         }
2193         DELAY(160);
2194         bval |= 0x80;
2195         pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2196         DELAY(160);
2197         pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2198         DELAY(160);
2199 }
2200
2201 static int
2202 amd_EEpromInDO(struct amd_softc *amd)
2203 {
2204         pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2205         DELAY(160);
2206         pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2207         DELAY(160);
2208         if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2209                 return (1);
2210         return (0);
2211 }
2212
2213 static u_int16_t
2214 EEpromGetData1(struct amd_softc *amd)
2215 {
2216         u_int     i;
2217         u_int     carryFlag;
2218         u_int16_t wval;
2219
2220         wval = 0;
2221         for (i = 0; i < 16; i++) {
2222                 wval <<= 1;
2223                 carryFlag = amd_EEpromInDO(amd);
2224                 wval |= carryFlag;
2225         }
2226         return (wval);
2227 }
2228
2229 static void
2230 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2231 {
2232         u_int i, j;
2233         int carryFlag;
2234
2235         carryFlag = 1;
2236         j = 0x80;
2237         for (i = 0; i < 9; i++) {
2238                 amd_EEpromOutDI(amd, regval, carryFlag);
2239                 carryFlag = (EEpromCmd & j) ? 1 : 0;
2240                 j >>= 1;
2241         }
2242 }
2243
2244 static void
2245 amd_ReadEEprom(struct amd_softc *amd)
2246 {
2247         int        regval;
2248         u_int      i;
2249         u_int16_t *ptr;
2250         u_int8_t   cmd;
2251
2252         ptr = (u_int16_t *)&amd->eepromBuf[0];
2253         cmd = EEPROM_READ;
2254         for (i = 0; i < 0x40; i++) {
2255                 amd_EnDisableCE(amd, ENABLE_CE, &regval);
2256                 amd_Prepare(amd, &regval, cmd);
2257                 *ptr = EEpromGetData1(amd);
2258                 ptr++;
2259                 cmd++;
2260                 amd_EnDisableCE(amd, DISABLE_CE, &regval);
2261         }
2262 }
2263
2264 static void
2265 amd_load_defaults(struct amd_softc *amd)
2266 {
2267         int target;
2268
2269         bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2270         for (target = 0; target < MAX_SCSI_ID; target++)
2271                 amd->eepromBuf[target << 2] =
2272                     (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2273         amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2274         amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2275         amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2276 }
2277
2278 static void
2279 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2280 {
2281         u_int16_t  wval, *ptr;
2282         u_int8_t   i;
2283
2284         amd_ReadEEprom(amd);
2285         wval = 0;
2286         ptr = (u_int16_t *) & amd->eepromBuf[0];
2287         for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2288                 wval += *ptr;
2289
2290         if (wval != EE_CHECKSUM) {
2291                 if (bootverbose)
2292                         printf("amd%d: SEEPROM data unavailable.  "
2293                                "Using default device parameters.\n",
2294                                amd->unit);
2295                 amd_load_defaults(amd);
2296         }
2297 }
2298
2299 /*
2300  **********************************************************************
2301  * Function      : static int amd_init (struct Scsi_Host *host)
2302  * Purpose       : initialize the internal structures for a given SCSI host
2303  * Inputs        : host - pointer to this host adapter's structure/
2304  **********************************************************************
2305  */
2306 static int
2307 amd_init(device_t dev)
2308 {
2309         struct amd_softc *amd = device_get_softc(dev);
2310         struct resource *iores;
2311         int     i, rid;
2312         u_int   bval;
2313
2314         rid = PCI_BASE_ADDR0;
2315         iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE);
2316         if (iores == NULL) {
2317                 if (bootverbose)
2318                         printf("amd_init: bus_alloc_resource failure!\n");
2319                 return ENXIO;
2320         }
2321         amd->tag = rman_get_bustag(iores);
2322         amd->bsh = rman_get_bushandle(iores);
2323
2324         /* DMA tag for mapping buffers into device visible space. */
2325         if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2326                                /*boundary*/0,
2327                                /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2328                                /*highaddr*/BUS_SPACE_MAXADDR,
2329                                /*filter*/NULL, /*filterarg*/NULL,
2330                                /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2331                                /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2332                                /*flags*/BUS_DMA_ALLOCNOW,
2333                                /*lockfunc*/busdma_lock_mutex,
2334                                /*lockarg*/&Giant,
2335                                &amd->buffer_dmat) != 0) {
2336                 if (bootverbose)
2337                         printf("amd_init: bus_dma_tag_create failure!\n");
2338                 return ENXIO;
2339         }
2340
2341         /* Create, allocate, and map DMA buffers for autosense data */
2342         if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2343                                /*boundary*/0,
2344                                /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2345                                /*highaddr*/BUS_SPACE_MAXADDR,
2346                                /*filter*/NULL, /*filterarg*/NULL,
2347                                sizeof(struct scsi_sense_data) * MAX_SRB_CNT,
2348                                /*nsegments*/1,
2349                                /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2350                                /*flags*/0,
2351                                /*lockfunc*/busdma_lock_mutex,
2352                                /*lockarg*/&Giant, &amd->sense_dmat) != 0) {
2353                 if (bootverbose)
2354                         device_printf(dev, "cannot create sense buffer dmat\n");
2355                 return (ENXIO);
2356         }
2357
2358         if (bus_dmamem_alloc(amd->sense_dmat, (void **)&amd->sense_buffers,
2359                              BUS_DMA_NOWAIT, &amd->sense_dmamap) != 0)
2360                 return (ENOMEM);
2361
2362         bus_dmamap_load(amd->sense_dmat, amd->sense_dmamap,
2363                        amd->sense_buffers,
2364                        sizeof(struct scsi_sense_data) * MAX_SRB_CNT,
2365                        amd_dmamap_cb, &amd->sense_busaddr, /*flags*/0);
2366
2367         TAILQ_INIT(&amd->free_srbs);
2368         TAILQ_INIT(&amd->running_srbs);
2369         TAILQ_INIT(&amd->waiting_srbs);
2370         amd->last_phase = SCSI_BUS_FREE;
2371         amd->dev = dev;
2372         amd->unit = device_get_unit(dev);
2373         amd->SRBCount = MAX_SRB_CNT;
2374         amd->status = 0;
2375         amd_load_eeprom_or_defaults(amd);
2376         amd->max_id = 7;
2377         if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2378                 amd->max_lun = 7;
2379         } else {
2380                 amd->max_lun = 0;
2381         }
2382         amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2383         amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2384         amd->AdaptSCSILUN = 0;
2385         /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2386         amd->ACBFlag = 0;
2387         amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2388         amd_linkSRB(amd);
2389         for (i = 0; i <= amd->max_id; i++) {
2390
2391                 if (amd->AdaptSCSIID != i) {
2392                         struct amd_target_info *tinfo;
2393                         PEEprom prom;
2394
2395                         tinfo = &amd->tinfo[i];
2396                         prom = (PEEprom)&amd->eepromBuf[i << 2];
2397                         if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2398                                 tinfo->disc_tag |= AMD_USR_DISCENB;
2399                                 if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2400                                         tinfo->disc_tag |= AMD_USR_TAGENB;
2401                         }
2402                         if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2403                                 tinfo->user.period =
2404                                     eeprom_period[prom->EE_SPEED];
2405                                 tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2406                         }
2407                         tinfo->CtrlR1 = amd->AdaptSCSIID;
2408                         if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2409                                 tinfo->CtrlR1 |= PARITY_ERR_REPO;
2410                         tinfo->CtrlR3 = FAST_CLK;
2411                         tinfo->CtrlR4 = EATER_25NS;
2412                         if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2413                                 tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2414                 }
2415         }
2416         amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2417         /* Conversion factor = 0 , 40MHz clock */
2418         amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2419         /* NOP cmd - clear command register */
2420         amd_write8(amd, SCSICMDREG, NOP_CMD);   
2421         amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2422         amd_write8(amd, CNTLREG3, FAST_CLK);
2423         bval = EATER_25NS;
2424         if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2425                 bval |= NEGATE_REQACKDATA;
2426         }
2427         amd_write8(amd, CNTLREG4, bval);
2428
2429         /* Disable SCSI bus reset interrupt */
2430         amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2431
2432         return 0;
2433 }
2434
2435 /*
2436  * attach and init a host adapter
2437  */
2438 static int
2439 amd_attach(device_t dev)
2440 {
2441         struct cam_devq *devq;  /* Device Queue to use for this SIM */
2442         u_int8_t        intstat;
2443         struct amd_softc *amd = device_get_softc(dev);
2444         int             unit = device_get_unit(dev);
2445         int             rid;
2446         void            *ih;
2447         struct resource *irqres;
2448
2449         if (amd_init(dev)) {
2450                 if (bootverbose)
2451                         printf("amd_attach: amd_init failure!\n");
2452                 return ENXIO;
2453         }
2454
2455         /* Reset Pending INT */
2456         intstat = amd_read8(amd, INTSTATREG);
2457
2458         /* After setting up the adapter, map our interrupt */
2459         rid = 0;
2460         irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2461                                         RF_SHAREABLE | RF_ACTIVE);
2462         if (irqres == NULL ||
2463             bus_setup_intr(dev, irqres, INTR_TYPE_CAM | INTR_ENTROPY,
2464             NULL, amd_intr, amd, &ih)) {
2465                 if (bootverbose)
2466                         printf("amd%d: unable to register interrupt handler!\n",
2467                                unit);
2468                 return ENXIO;
2469         }
2470
2471         /*
2472          * Now let the CAM generic SCSI layer find the SCSI devices on
2473          * the bus *  start queue to reset to the idle loop. *
2474          * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
2475          * max_sim_transactions
2476          */
2477         devq = cam_simq_alloc(MAX_START_JOB);
2478         if (devq == NULL) {
2479                 if (bootverbose)
2480                         printf("amd_attach: cam_simq_alloc failure!\n");
2481                 return ENXIO;
2482         }
2483
2484         amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2485                                   amd, amd->unit, &Giant,
2486                                   1, MAX_TAGS_CMD_QUEUE, devq);
2487         if (amd->psim == NULL) {
2488                 cam_simq_free(devq);
2489                 if (bootverbose)
2490                         printf("amd_attach: cam_sim_alloc failure!\n");
2491                 return ENXIO;
2492         }
2493
2494         if (xpt_bus_register(amd->psim, dev, 0) != CAM_SUCCESS) {
2495                 cam_sim_free(amd->psim, /*free_devq*/TRUE);
2496                 if (bootverbose)
2497                         printf("amd_attach: xpt_bus_register failure!\n");
2498                 return ENXIO;
2499         }
2500
2501         if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2502                             cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2503                             CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2504                 xpt_bus_deregister(cam_sim_path(amd->psim));
2505                 cam_sim_free(amd->psim, /* free_simq */ TRUE);
2506                 if (bootverbose)
2507                         printf("amd_attach: xpt_create_path failure!\n");
2508                 return ENXIO;
2509         }
2510
2511         return 0;
2512 }
2513
2514 static int
2515 amd_probe(device_t dev)
2516 {
2517         if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2518                 device_set_desc(dev,
2519                         "Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2520                 return BUS_PROBE_DEFAULT;
2521         }
2522         return ENXIO;
2523 }
2524
2525 static device_method_t amd_methods[] = {
2526         /* Device interface */
2527         DEVMETHOD(device_probe,         amd_probe),
2528         DEVMETHOD(device_attach,        amd_attach),
2529         { 0, 0 }
2530 };
2531
2532 static driver_t amd_driver = {
2533         "amd", amd_methods, sizeof(struct amd_softc)
2534 };
2535
2536 static devclass_t amd_devclass;
2537 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);
2538 MODULE_DEPEND(amd, pci, 1, 1, 1);
2539 MODULE_DEPEND(amd, cam, 1, 1, 1);