]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/advansys/advlib.c
Use the new insecure-lan-zones option instead of listing each AS112 zone
[FreeBSD/FreeBSD.git] / sys / dev / advansys / advlib.c
1 /*-
2  * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3  *
4  * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 /*-
32  * Ported from:
33  * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
34  *     
35  * Copyright (c) 1995-1996 Advanced System Products, Inc.
36  * All Rights Reserved.
37  *   
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that redistributions of source
40  * code retain the above copyright notice and this comment without
41  * modification.
42  */
43
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/conf.h>
49 #include <sys/lock.h>
50 #include <sys/kernel.h>
51 #include <sys/mutex.h>
52 #include <sys/systm.h>
53
54 #include <machine/bus.h>
55 #include <machine/resource.h>
56 #include <sys/bus.h> 
57 #include <sys/rman.h> 
58
59 #include <cam/cam.h>
60 #include <cam/cam_ccb.h>
61 #include <cam/cam_sim.h>
62 #include <cam/cam_xpt_sim.h>
63
64 #include <cam/scsi/scsi_all.h>
65 #include <cam/scsi/scsi_message.h>
66 #include <cam/scsi/scsi_da.h>
67 #include <cam/scsi/scsi_cd.h>
68
69 #include <vm/vm.h>
70 #include <vm/vm_param.h>
71 #include <vm/pmap.h>
72
73 #include <dev/advansys/advansys.h>
74 #include <dev/advansys/advmcode.h>
75
76 struct adv_quirk_entry {
77         struct scsi_inquiry_pattern inq_pat;
78         u_int8_t quirks;
79 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS  0x01
80 #define ADV_QUIRK_FIX_ASYN_XFER         0x02
81 };
82
83 static struct adv_quirk_entry adv_quirk_table[] =
84 {
85         {
86                 { T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
87                 ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
88         },
89         {
90                 { T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
91                 0
92         },
93         {
94                 {
95                   T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
96                   "TANDBERG", " TDC 36", "*"
97                 },
98                 0
99         },
100         {
101                 { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
102                 0
103         },
104         {
105                 {
106                   T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
107                   "*", "*", "*"
108                 },
109                 0
110         },
111         {
112                 {
113                   T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
114                   "*", "*", "*"
115                 },
116                 0
117         },
118         {
119                 /* Default quirk entry */
120                 {
121                   T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
122                   /*vendor*/"*", /*product*/"*", /*revision*/"*"
123                 }, 
124                 ADV_QUIRK_FIX_ASYN_XFER,
125         }
126 };
127
128 /*
129  * Allowable periods in ns
130  */
131 static u_int8_t adv_sdtr_period_tbl[] =
132 {
133         25,
134         30,
135         35,
136         40,
137         50,
138         60,
139         70,
140         85
141 };
142
143 static u_int8_t adv_sdtr_period_tbl_ultra[] =
144 {
145         12,
146         19,
147         25,
148         32,
149         38,
150         44,
151         50,
152         57,
153         63,
154         69,
155         75,
156         82,
157         88, 
158         94,
159         100,
160         107
161 };
162
163 struct ext_msg {
164         u_int8_t msg_type;
165         u_int8_t msg_len;
166         u_int8_t msg_req;
167         union {
168                 struct {
169                         u_int8_t sdtr_xfer_period;
170                         u_int8_t sdtr_req_ack_offset;
171                 } sdtr;
172                 struct {
173                         u_int8_t wdtr_width;
174                 } wdtr;
175                 struct {
176                         u_int8_t mdp[4];
177                 } mdp;
178         } u_ext_msg;
179         u_int8_t res;
180 };
181
182 #define xfer_period     u_ext_msg.sdtr.sdtr_xfer_period
183 #define req_ack_offset  u_ext_msg.sdtr.sdtr_req_ack_offset
184 #define wdtr_width      u_ext_msg.wdtr.wdtr_width
185 #define mdp_b3          u_ext_msg.mdp_b3
186 #define mdp_b2          u_ext_msg.mdp_b2
187 #define mdp_b1          u_ext_msg.mdp_b1
188 #define mdp_b0          u_ext_msg.mdp_b0
189
190 /*
191  * Some of the early PCI adapters have problems with
192  * async transfers.  Instead use an offset of 1.
193  */
194 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
195
196 /* LRAM routines */
197 static void      adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
198                                         u_int16_t *buffer, int count);
199 static void      adv_write_lram_16_multi(struct adv_softc *adv,
200                                          u_int16_t s_addr, u_int16_t *buffer,
201                                          int count);
202 static void      adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
203                                   u_int16_t set_value, int count);
204 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
205                                   int count);
206
207 static int       adv_write_and_verify_lram_16(struct adv_softc *adv,
208                                               u_int16_t addr, u_int16_t value);
209 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
210
211
212 static void      adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
213                                    u_int32_t value);
214 static void      adv_write_lram_32_multi(struct adv_softc *adv,
215                                          u_int16_t s_addr, u_int32_t *buffer,
216                                          int count);
217
218 /* EEPROM routines */
219 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
220 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
221                                      u_int16_t value);
222 static int       adv_write_eeprom_cmd_reg(struct adv_softc *adv,
223                                           u_int8_t cmd_reg);
224 static int       adv_set_eeprom_config_once(struct adv_softc *adv,
225                                             struct adv_eeprom_config *eeconfig);
226
227 /* Initialization */
228 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
229                                     u_int16_t *mcode_buf, u_int16_t mcode_size);
230
231 static void      adv_reinit_lram(struct adv_softc *adv);
232 static void      adv_init_lram(struct adv_softc *adv);
233 static int       adv_init_microcode_var(struct adv_softc *adv);
234 static void      adv_init_qlink_var(struct adv_softc *adv);
235
236 /* Interrupts */
237 static void      adv_disable_interrupt(struct adv_softc *adv);
238 static void      adv_enable_interrupt(struct adv_softc *adv);
239 static void      adv_toggle_irq_act(struct adv_softc *adv);
240
241 /* Chip Control */
242 static int       adv_host_req_chip_halt(struct adv_softc *adv);
243 static void      adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
244 #if 0
245 static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
246 #endif
247
248 /* Queue handling and execution */
249 static __inline int
250                  adv_sgcount_to_qcount(int sgcount);
251
252 static __inline int
253 adv_sgcount_to_qcount(int sgcount)
254 {
255         int     n_sg_list_qs;
256
257         n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
258         if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
259                 n_sg_list_qs++;
260         return (n_sg_list_qs + 1);
261 }
262
263 #if BYTE_ORDER == BIG_ENDIAN
264 static void      adv_adj_endian_qdone_info(struct adv_q_done_info *);
265 static void      adv_adj_scsiq_endian(struct adv_scsi_q *);
266 #endif
267 static void      adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
268                                 u_int16_t *inbuf, int words);
269 static u_int     adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
270 static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
271                                        u_int8_t free_q_head, u_int8_t n_free_q);
272 static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
273                                       u_int8_t free_q_head);
274 static int       adv_send_scsi_queue(struct adv_softc *adv,
275                                      struct adv_scsi_q *scsiq,
276                                      u_int8_t n_q_required);
277 static void      adv_put_ready_sg_list_queue(struct adv_softc *adv,
278                                              struct adv_scsi_q *scsiq,
279                                              u_int q_no);
280 static void      adv_put_ready_queue(struct adv_softc *adv,
281                                      struct adv_scsi_q *scsiq, u_int q_no);
282 static void      adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
283                                u_int16_t *buffer, int words);
284
285 /* Messages */
286 static void      adv_handle_extmsg_in(struct adv_softc *adv,
287                                       u_int16_t halt_q_addr, u_int8_t q_cntl,
288                                       target_bit_vector target_id,
289                                       int tid);
290 static void      adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
291                                  u_int8_t sdtr_offset);
292 static void      adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
293                                         u_int8_t sdtr_data);
294
295
296 /* Exported functions first */
297
298 void
299 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
300 {
301         struct adv_softc *adv;
302
303         adv = (struct adv_softc *)callback_arg;
304         mtx_assert(&adv->lock, MA_OWNED);
305         switch (code) {
306         case AC_FOUND_DEVICE:
307         {
308                 struct ccb_getdev *cgd;
309                 target_bit_vector target_mask;
310                 int num_entries;
311                 caddr_t match;
312                 struct adv_quirk_entry *entry;
313                 struct adv_target_transinfo* tinfo;
314  
315                 cgd = (struct ccb_getdev *)arg;
316
317                 target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
318
319                 num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
320                 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
321                                        (caddr_t)adv_quirk_table,
322                                        num_entries, sizeof(*adv_quirk_table),
323                                        scsi_inquiry_match);
324         
325                 if (match == NULL)
326                         panic("advasync: device didn't match wildcard entry!!");
327
328                 entry = (struct adv_quirk_entry *)match;
329
330                 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
331                         if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
332                                 adv->fix_asyn_xfer_always |= target_mask;
333                         else
334                                 adv->fix_asyn_xfer_always &= ~target_mask;
335                         /*
336                          * We start out life with all bits set and clear them
337                          * after we've determined that the fix isn't necessary.
338                          * It may well be that we've already cleared a target
339                          * before the full inquiry session completes, so don't
340                          * gratuitously set a target bit even if it has this
341                          * quirk.  But, if the quirk exonerates a device, clear
342                          * the bit now.
343                          */
344                         if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
345                                 adv->fix_asyn_xfer &= ~target_mask;
346                 }
347                 /*
348                  * Reset our sync settings now that we've determined
349                  * what quirks are in effect for the device.
350                  */
351                 tinfo = &adv->tinfo[cgd->ccb_h.target_id];
352                 adv_set_syncrate(adv, cgd->ccb_h.path,
353                                  cgd->ccb_h.target_id,
354                                  tinfo->current.period,
355                                  tinfo->current.offset,
356                                  ADV_TRANS_CUR);
357                 break;
358         }
359         case AC_LOST_DEVICE:
360         {
361                 u_int target_mask;
362
363                 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
364                         target_mask = 0x01 << xpt_path_target_id(path);
365                         adv->fix_asyn_xfer |= target_mask;
366                 }
367
368                 /*
369                  * Revert to async transfers
370                  * for the next device.
371                  */
372                 adv_set_syncrate(adv, /*path*/NULL,
373                                  xpt_path_target_id(path),
374                                  /*period*/0,
375                                  /*offset*/0,
376                                  ADV_TRANS_GOAL|ADV_TRANS_CUR);
377         }
378         default:
379                 break;
380         }
381 }
382
383 void
384 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
385 {
386         u_int8_t control;
387
388         /*
389          * Start out with the bank reset to 0
390          */
391         control = ADV_INB(adv, ADV_CHIP_CTRL)
392                   &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
393                         | ADV_CC_DIAG | ADV_CC_SCSI_RESET
394                         | ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
395         if (bank == 1) {
396                 control |= ADV_CC_BANK_ONE;
397         } else if (bank == 2) {
398                 control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
399         }
400         ADV_OUTB(adv, ADV_CHIP_CTRL, control);
401 }
402
403 u_int8_t
404 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
405 {
406         u_int8_t   byte_data;
407         u_int16_t  word_data;
408
409         /*
410          * LRAM is accessed on 16bit boundaries.
411          */
412         ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
413         word_data = ADV_INW(adv, ADV_LRAM_DATA);
414         if (addr & 1) {
415 #if BYTE_ORDER == BIG_ENDIAN
416                 byte_data = (u_int8_t)(word_data & 0xFF);
417 #else
418                 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
419 #endif
420         } else {
421 #if BYTE_ORDER == BIG_ENDIAN
422                 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
423 #else           
424                 byte_data = (u_int8_t)(word_data & 0xFF);
425 #endif
426         }
427         return (byte_data);
428 }
429
430 void
431 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
432 {
433         u_int16_t word_data;
434
435         word_data = adv_read_lram_16(adv, addr & 0xFFFE);
436         if (addr & 1) {
437                 word_data &= 0x00FF;
438                 word_data |= (((u_int8_t)value << 8) & 0xFF00);
439         } else {
440                 word_data &= 0xFF00;
441                 word_data |= ((u_int8_t)value & 0x00FF);
442         }
443         adv_write_lram_16(adv, addr & 0xFFFE, word_data);
444 }
445
446
447 u_int16_t
448 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
449 {
450         ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
451         return (ADV_INW(adv, ADV_LRAM_DATA));
452 }
453
454 void
455 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
456 {
457         ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
458         ADV_OUTW(adv, ADV_LRAM_DATA, value);
459 }
460
461 /*
462  * Determine if there is a board at "iobase" by looking
463  * for the AdvanSys signatures.  Return 1 if a board is
464  * found, 0 otherwise.
465  */
466 int                         
467 adv_find_signature(struct resource *res)
468 {                            
469         u_int16_t signature;
470
471         if (bus_read_1(res, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
472                 signature = bus_read_2(res, ADV_SIGNATURE_WORD);
473                 if ((signature == ADV_1000_ID0W)
474                  || (signature == ADV_1000_ID0W_FIX))
475                         return (1);
476         }
477         return (0);
478 }
479
480 void
481 adv_lib_init(struct adv_softc *adv)
482 {
483         if ((adv->type & ADV_ULTRA) != 0) {
484                 adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
485                 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
486         } else {
487                 adv->sdtr_period_tbl = adv_sdtr_period_tbl;
488                 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);                
489         }
490 }
491
492 u_int16_t
493 adv_get_eeprom_config(struct adv_softc *adv, struct
494                       adv_eeprom_config  *eeprom_config)
495 {
496         u_int16_t       sum;
497         u_int16_t       *wbuf;
498         u_int8_t        cfg_beg;
499         u_int8_t        cfg_end;
500         u_int8_t        s_addr;
501
502         wbuf = (u_int16_t *)eeprom_config;
503         sum = 0;
504
505         for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
506                 *wbuf = adv_read_eeprom_16(adv, s_addr);
507                 sum += *wbuf;
508         }
509
510         if (adv->type & ADV_VL) {
511                 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
512                 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
513         } else {
514                 cfg_beg = ADV_EEPROM_CFG_BEG;
515                 cfg_end = ADV_EEPROM_MAX_ADDR;
516         }
517
518         for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
519                 *wbuf = adv_read_eeprom_16(adv, s_addr);
520                 sum += *wbuf;
521 #ifdef ADV_DEBUG_EEPROM
522                 printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
523 #endif
524         }
525         *wbuf = adv_read_eeprom_16(adv, s_addr);
526         return (sum);
527 }
528
529 int
530 adv_set_eeprom_config(struct adv_softc *adv,
531                       struct adv_eeprom_config *eeprom_config)
532 {
533         int     retry;
534
535         retry = 0;
536         while (1) {
537                 if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
538                         break;
539                 }
540                 if (++retry > ADV_EEPROM_MAX_RETRY) {
541                         break;
542                 }
543         }
544         return (retry > ADV_EEPROM_MAX_RETRY);
545 }
546
547 int
548 adv_reset_chip(struct adv_softc *adv, int reset_bus)
549 {
550         adv_stop_chip(adv);
551         ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
552                                      | (reset_bus ? ADV_CC_SCSI_RESET : 0));
553         DELAY(60);
554
555         adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
556         adv_set_chip_ih(adv, ADV_INS_HALT);
557
558         if (reset_bus)
559                 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
560
561         ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
562         if (reset_bus)
563                 DELAY(200 * 1000);
564
565         ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
566         ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
567         return (adv_is_chip_halted(adv));
568 }
569
570 int
571 adv_test_external_lram(struct adv_softc* adv)
572 {
573         u_int16_t       q_addr;
574         u_int16_t       saved_value;
575         int             success;
576
577         success = 0;
578
579         q_addr = ADV_QNO_TO_QADDR(241);
580         saved_value = adv_read_lram_16(adv, q_addr);
581         if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
582                 success = 1;
583                 adv_write_lram_16(adv, q_addr, saved_value);
584         }
585         return (success);
586 }
587
588
589 int
590 adv_init_lram_and_mcode(struct adv_softc *adv)
591 {
592         u_int32_t       retval;
593
594         adv_disable_interrupt(adv);
595
596         adv_init_lram(adv);
597
598         retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
599                                     adv_mcode_size);
600         if (retval != adv_mcode_chksum) {
601                 device_printf(adv->dev,
602                     "Microcode download failed checksum!\n");
603                 return (1);
604         }
605         
606         if (adv_init_microcode_var(adv) != 0)
607                 return (1);
608
609         adv_enable_interrupt(adv);
610         return (0);
611 }
612
613 u_int8_t
614 adv_get_chip_irq(struct adv_softc *adv)
615 {
616         u_int16_t       cfg_lsw;
617         u_int8_t        chip_irq;
618
619         cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
620
621         if ((adv->type & ADV_VL) != 0) {
622                 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
623                 if ((chip_irq == 0) ||
624                     (chip_irq == 4) ||
625                     (chip_irq == 7)) {
626                         return (0);
627                 }
628                 return (chip_irq + (ADV_MIN_IRQ_NO - 1));
629         }
630         chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
631         if (chip_irq == 3)
632                 chip_irq += 2;
633         return (chip_irq + ADV_MIN_IRQ_NO);
634 }
635
636 u_int8_t
637 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
638 {
639         u_int16_t       cfg_lsw;
640
641         if ((adv->type & ADV_VL) != 0) {
642                 if (irq_no != 0) {
643                         if ((irq_no < ADV_MIN_IRQ_NO)
644                          || (irq_no > ADV_MAX_IRQ_NO)) {
645                                 irq_no = 0;
646                         } else {
647                                 irq_no -= ADV_MIN_IRQ_NO - 1;
648                         }
649                 }
650                 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
651                 cfg_lsw |= 0x0010;
652                 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
653                 adv_toggle_irq_act(adv);
654
655                 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
656                 cfg_lsw |= (irq_no & 0x07) << 2;
657                 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
658                 adv_toggle_irq_act(adv);
659         } else if ((adv->type & ADV_ISA) != 0) {
660                 if (irq_no == 15)
661                         irq_no -= 2;
662                 irq_no -= ADV_MIN_IRQ_NO;
663                 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
664                 cfg_lsw |= (irq_no & 0x03) << 2;
665                 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
666         }
667         return (adv_get_chip_irq(adv));
668 }
669
670 void
671 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
672 {
673         u_int16_t cfg_lsw;
674
675         cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
676         if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
677                 return;
678         cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
679         cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
680         ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
681 }
682
683 int
684 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
685                        u_int32_t datalen)
686 {
687         struct          adv_target_transinfo* tinfo;
688         u_int32_t       *p_data_addr;
689         u_int32_t       *p_data_bcount;
690         int             disable_syn_offset_one_fix;
691         int             retval;
692         u_int           n_q_required;
693         u_int32_t       addr;
694         u_int8_t        sg_entry_cnt;
695         u_int8_t        target_ix;
696         u_int8_t        sg_entry_cnt_minus_one;
697         u_int8_t        tid_no;
698
699         if (!dumping)
700                 mtx_assert(&adv->lock, MA_OWNED);
701         scsiq->q1.q_no = 0;
702         retval = 1;  /* Default to error case */
703         target_ix = scsiq->q2.target_ix;
704         tid_no = ADV_TIX_TO_TID(target_ix);
705         tinfo = &adv->tinfo[tid_no];
706
707         if (scsiq->cdbptr[0] == REQUEST_SENSE) {
708                 /* Renegotiate if appropriate. */
709                 adv_set_syncrate(adv, /*struct cam_path */NULL,
710                                  tid_no, /*period*/0, /*offset*/0,
711                                  ADV_TRANS_CUR);
712                 if (tinfo->current.period != tinfo->goal.period) {
713                         adv_msgout_sdtr(adv, tinfo->goal.period,
714                                         tinfo->goal.offset);
715                         scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
716                 }
717         }
718
719         if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
720                 sg_entry_cnt = scsiq->sg_head->entry_cnt;
721                 sg_entry_cnt_minus_one = sg_entry_cnt - 1;
722
723 #ifdef DIAGNOSTIC
724                 if (sg_entry_cnt <= 1) 
725                         panic("adv_execute_scsi_queue: Queue "
726                               "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
727
728                 if (sg_entry_cnt > ADV_MAX_SG_LIST)
729                         panic("adv_execute_scsi_queue: "
730                               "Queue with too many segs.");
731
732                 if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
733                         int i;
734
735                         for (i = 0; i < sg_entry_cnt_minus_one; i++) {
736                                 addr = scsiq->sg_head->sg_list[i].addr +
737                                        scsiq->sg_head->sg_list[i].bytes;
738
739                                 if ((addr & 0x0003) != 0)
740                                         panic("adv_execute_scsi_queue: SG "
741                                               "with odd address or byte count");
742                         }
743                 }
744 #endif
745                 p_data_addr =
746                     &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
747                 p_data_bcount =
748                     &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
749
750                 n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
751                 scsiq->sg_head->queue_cnt = n_q_required - 1;
752         } else {
753                 p_data_addr = &scsiq->q1.data_addr;
754                 p_data_bcount = &scsiq->q1.data_cnt;
755                 n_q_required = 1;
756         }
757
758         disable_syn_offset_one_fix = FALSE;
759
760         if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
761          && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
762
763                 if (datalen != 0) {
764                         if (datalen < 512) {
765                                 disable_syn_offset_one_fix = TRUE;
766                         } else {
767                                 if (scsiq->cdbptr[0] == INQUIRY
768                                  || scsiq->cdbptr[0] == REQUEST_SENSE
769                                  || scsiq->cdbptr[0] == READ_CAPACITY
770                                  || scsiq->cdbptr[0] == MODE_SELECT_6 
771                                  || scsiq->cdbptr[0] == MODE_SENSE_6
772                                  || scsiq->cdbptr[0] == MODE_SENSE_10 
773                                  || scsiq->cdbptr[0] == MODE_SELECT_10 
774                                  || scsiq->cdbptr[0] == READ_TOC) {
775                                         disable_syn_offset_one_fix = TRUE;
776                                 }
777                         }
778                 }
779         }
780
781         if (disable_syn_offset_one_fix) {
782                 scsiq->q2.tag_code &=
783                     ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
784                 scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
785                                      | ADV_TAG_FLAG_DISABLE_DISCONNECT);
786         }
787
788         if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
789          && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
790                 u_int8_t extra_bytes;
791
792                 addr = *p_data_addr + *p_data_bcount;
793                 extra_bytes = addr & 0x0003;
794                 if (extra_bytes != 0
795                  && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
796                   || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
797                         scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
798                         scsiq->q1.extra_bytes = extra_bytes;
799                         *p_data_bcount -= extra_bytes;
800                 }
801         }
802
803         if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
804          || ((scsiq->q1.cntl & QC_URGENT) != 0))
805                 retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
806         
807         return (retval);
808 }
809
810
811 u_int8_t
812 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
813                     struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
814 {
815         u_int16_t val;
816         u_int8_t  sg_queue_cnt;
817
818         adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
819                        (u_int16_t *)scsiq,
820                        (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
821
822 #if BYTE_ORDER == BIG_ENDIAN
823         adv_adj_endian_qdone_info(scsiq);
824 #endif
825
826         val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
827         scsiq->q_status = val & 0xFF;
828         scsiq->q_no = (val >> 8) & 0XFF;
829
830         val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
831         scsiq->cntl = val & 0xFF;
832         sg_queue_cnt = (val >> 8) & 0xFF;
833
834         val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
835         scsiq->sense_len = val & 0xFF;
836         scsiq->extra_bytes = (val >> 8) & 0xFF;
837
838         /*
839          * Due to a bug in accessing LRAM on the 940UA, the residual
840          * is split into separate high and low 16bit quantities.
841          */
842         scsiq->remain_bytes =
843             adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
844         scsiq->remain_bytes |=
845             adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
846
847         /*
848          * XXX Is this just a safeguard or will the counter really
849          * have bogus upper bits?
850          */
851         scsiq->remain_bytes &= max_dma_count;
852
853         return (sg_queue_cnt);
854 }
855
856 int
857 adv_start_chip(struct adv_softc *adv)
858 {
859         ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
860         if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
861                 return (0);
862         return (1);
863 }
864
865 int
866 adv_stop_execution(struct adv_softc *adv)
867 {
868         int count;
869
870         count = 0;
871         if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
872                 adv_write_lram_8(adv, ADV_STOP_CODE_B,
873                                  ADV_STOP_REQ_RISC_STOP);
874                 do {
875                         if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
876                                 ADV_STOP_ACK_RISC_STOP) {
877                                 return (1);
878                         }
879                         DELAY(1000);
880                 } while (count++ < 20);
881         }
882         return (0);
883 }
884
885 int
886 adv_is_chip_halted(struct adv_softc *adv)
887 {
888         if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
889                 if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
890                         return (1);
891                 }
892         }
893         return (0);
894 }
895
896 /*
897  * XXX The numeric constants and the loops in this routine
898  * need to be documented.
899  */
900 void
901 adv_ack_interrupt(struct adv_softc *adv)
902 {
903         u_int8_t        host_flag;
904         u_int8_t        risc_flag;
905         int             loop;
906
907         loop = 0;
908         do {
909                 risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
910                 if (loop++ > 0x7FFF) {
911                         break;
912                 }
913         } while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
914
915         host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
916         adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
917                          host_flag | ADV_HOST_FLAG_ACK_INT);
918
919         ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
920         loop = 0;
921         while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
922                 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
923                 if (loop++ > 3) {
924                         break;
925                 }
926         }
927
928         adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
929 }
930
931 /*
932  * Handle all conditions that may halt the chip waiting
933  * for us to intervene.
934  */
935 void
936 adv_isr_chip_halted(struct adv_softc *adv)
937 {
938         u_int16_t         int_halt_code;
939         u_int16_t         halt_q_addr;
940         target_bit_vector target_mask;
941         target_bit_vector scsi_busy;
942         u_int8_t          halt_qp;
943         u_int8_t          target_ix;
944         u_int8_t          q_cntl;
945         u_int8_t          tid_no;
946
947         if (!dumping)
948                 mtx_assert(&adv->lock, MA_OWNED);
949         int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
950         halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
951         halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
952         target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
953         q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
954         tid_no = ADV_TIX_TO_TID(target_ix);
955         target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
956         if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
957                 /*
958                  * Temporarily disable the async fix by removing
959                  * this target from the list of affected targets,
960                  * setting our async rate, and then putting us
961                  * back into the mask.
962                  */
963                 adv->fix_asyn_xfer &= ~target_mask;
964                 adv_set_syncrate(adv, /*struct cam_path */NULL,
965                                  tid_no, /*period*/0, /*offset*/0,
966                                  ADV_TRANS_ACTIVE);
967                 adv->fix_asyn_xfer |= target_mask;
968         } else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
969                 adv_set_syncrate(adv, /*struct cam_path */NULL,
970                                  tid_no, /*period*/0, /*offset*/0,
971                                  ADV_TRANS_ACTIVE);
972         } else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
973                 adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
974                                      target_mask, tid_no);
975         } else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
976                 struct    adv_target_transinfo* tinfo;
977                 struct    adv_ccb_info *cinfo;
978                 union     ccb *ccb;
979                 u_int32_t cinfo_index;
980                 u_int8_t  tag_code;
981                 u_int8_t  q_status;
982
983                 tinfo = &adv->tinfo[tid_no];
984                 q_cntl |= QC_REQ_SENSE;
985
986                 /* Renegotiate if appropriate. */
987                 adv_set_syncrate(adv, /*struct cam_path */NULL,
988                                  tid_no, /*period*/0, /*offset*/0,
989                                  ADV_TRANS_CUR);
990                 if (tinfo->current.period != tinfo->goal.period) {
991                         adv_msgout_sdtr(adv, tinfo->goal.period,
992                                         tinfo->goal.offset);
993                         q_cntl |= QC_MSG_OUT;
994                 }
995                 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
996
997                 /* Don't tag request sense commands */
998                 tag_code = adv_read_lram_8(adv,
999                                            halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
1000                 tag_code &=
1001                     ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
1002
1003                 if ((adv->fix_asyn_xfer & target_mask) != 0
1004                  && (adv->fix_asyn_xfer_always & target_mask) == 0) {
1005                         tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
1006                                  | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
1007                 }
1008                 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
1009                                  tag_code);
1010                 q_status = adv_read_lram_8(adv,
1011                                            halt_q_addr + ADV_SCSIQ_B_STATUS);
1012                 q_status |= (QS_READY | QS_BUSY);
1013                 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1014                                  q_status);
1015                 /*
1016                  * Freeze the devq until we can handle the sense condition.
1017                  */
1018                 cinfo_index =
1019                     adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1020                 cinfo = &adv->ccb_infos[cinfo_index];
1021                 ccb = adv->ccb_infos[cinfo_index].ccb;
1022                 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1023                 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1024                 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1025                               /*ccb*/NULL, CAM_REQUEUE_REQ,
1026                               /*queued_only*/TRUE);
1027                 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1028                 scsi_busy &= ~target_mask;
1029                 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1030                 /*
1031                  * Ensure we have enough time to actually
1032                  * retrieve the sense.
1033                  */
1034                 callout_reset(&cinfo->timer, 5 * hz, adv_timeout, ccb);
1035         } else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1036                 struct  ext_msg out_msg;
1037
1038                 adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1039                                        (u_int16_t *) &out_msg,
1040                                        sizeof(out_msg)/2);
1041
1042                 if ((out_msg.msg_type == MSG_EXTENDED)
1043                  && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1044                  && (out_msg.msg_req == MSG_EXT_SDTR)) {
1045
1046                         /* Revert to Async */
1047                         adv_set_syncrate(adv, /*struct cam_path */NULL,
1048                                          tid_no, /*period*/0, /*offset*/0,
1049                                          ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1050                 }
1051                 q_cntl &= ~QC_MSG_OUT;
1052                 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1053         } else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1054                 union ccb *ccb;
1055                 u_int32_t cinfo_index;
1056                 
1057                 adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_SCSI_STATUS);
1058                 cinfo_index =
1059                     adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1060                 ccb = adv->ccb_infos[cinfo_index].ccb;
1061                 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1062                 ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1063                 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL; 
1064                 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1065                               /*ccb*/NULL, CAM_REQUEUE_REQ,
1066                               /*queued_only*/TRUE);
1067                 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1068                 scsi_busy &= ~target_mask;
1069                 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);              
1070         } else {
1071                 printf("Unhandled Halt Code %x\n", int_halt_code);
1072         }
1073         adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1074 }
1075
1076 void
1077 adv_sdtr_to_period_offset(struct adv_softc *adv,
1078                           u_int8_t sync_data, u_int8_t *period,
1079                           u_int8_t *offset, int tid)
1080 {
1081         if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1082          && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1083                 *period = *offset = 0;
1084         } else {
1085                 *period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1086                 *offset = sync_data & 0xF;
1087         }
1088 }
1089
1090 void
1091 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1092                  u_int tid, u_int period, u_int offset, u_int type)
1093 {
1094         struct adv_target_transinfo* tinfo;
1095         u_int old_period;
1096         u_int old_offset;
1097         u_int8_t sdtr_data;
1098
1099         mtx_assert(&adv->lock, MA_OWNED);
1100         tinfo = &adv->tinfo[tid];
1101
1102         /* Filter our input */
1103         sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1104                                               &offset, tid);
1105
1106         old_period = tinfo->current.period;
1107         old_offset = tinfo->current.offset;
1108
1109         if ((type & ADV_TRANS_CUR) != 0
1110          && ((old_period != period || old_offset != offset)
1111           || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1112                 int halted;
1113
1114                 halted = adv_is_chip_halted(adv);
1115                 if (halted == 0)
1116                         /* Must halt the chip first */
1117                         adv_host_req_chip_halt(adv);
1118
1119                 /* Update current hardware settings */
1120                 adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1121
1122                 /*
1123                  * If a target can run in sync mode, we don't need
1124                  * to check it for sync problems.
1125                  */
1126                 if (offset != 0)
1127                         adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1128
1129                 if (halted == 0)
1130                         /* Start the chip again */
1131                         adv_start_chip(adv);
1132
1133                 tinfo->current.period = period;
1134                 tinfo->current.offset = offset;
1135
1136                 if (path != NULL) {
1137                         /*
1138                          * Tell the SCSI layer about the
1139                          * new transfer parameters.
1140                          */
1141                         struct  ccb_trans_settings neg;
1142                         memset(&neg, 0, sizeof (neg));
1143                         struct ccb_trans_settings_spi *spi =
1144                             &neg.xport_specific.spi;
1145
1146                         neg.protocol = PROTO_SCSI;
1147                         neg.protocol_version = SCSI_REV_2;
1148                         neg.transport = XPORT_SPI;
1149                         neg.transport_version = 2;
1150
1151                         spi->sync_offset = offset;
1152                         spi->sync_period = period;
1153                         spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1154                         spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1155                         xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1156                         xpt_async(AC_TRANSFER_NEG, path, &neg);
1157                 }
1158         }
1159
1160         if ((type & ADV_TRANS_GOAL) != 0) {
1161                 tinfo->goal.period = period;
1162                 tinfo->goal.offset = offset;
1163         }
1164
1165         if ((type & ADV_TRANS_USER) != 0) {
1166                 tinfo->user.period = period;
1167                 tinfo->user.offset = offset;
1168         }
1169 }
1170
1171 u_int8_t
1172 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1173                           u_int *offset, int tid)
1174 {
1175         u_int i;
1176         u_int dummy_offset;
1177         u_int dummy_period;
1178
1179         if (offset == NULL) {
1180                 dummy_offset = 0;
1181                 offset = &dummy_offset;
1182         }
1183
1184         if (period == NULL) {
1185                 dummy_period = 0;
1186                 period = &dummy_period;
1187         }
1188
1189         *offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1190         if (*period != 0 && *offset != 0) {
1191                 for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1192                         if (*period <= adv->sdtr_period_tbl[i]) {
1193                                 /*       
1194                                  * When responding to a target that requests
1195                                  * sync, the requested  rate may fall between
1196                                  * two rates that we can output, but still be
1197                                  * a rate that we can receive.  Because of this,
1198                                  * we want to respond to the target with
1199                                  * the same rate that it sent to us even
1200                                  * if the period we use to send data to it
1201                                  * is lower.  Only lower the response period
1202                                  * if we must.
1203                                  */        
1204                                 if (i == 0 /* Our maximum rate */)
1205                                         *period = adv->sdtr_period_tbl[0];
1206                                 return ((i << 4) | *offset);
1207                         }
1208                 }
1209         }
1210         
1211         /* Must go async */
1212         *period = 0;
1213         *offset = 0;
1214         if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1215                 return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1216         return (0);
1217 }
1218
1219 /* Internal Routines */
1220
1221 static void
1222 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1223                        u_int16_t *buffer, int count)
1224 {
1225         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1226         ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1227 }
1228
1229 static void
1230 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1231                         u_int16_t *buffer, int count)
1232 {
1233         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1234         ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1235 }
1236
1237 static void
1238 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1239                  u_int16_t set_value, int count)
1240 {
1241         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1242         bus_set_multi_2(adv->res, adv->reg_off + ADV_LRAM_DATA,
1243             set_value, count);
1244 }
1245
1246 static u_int32_t
1247 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1248 {
1249         u_int32_t       sum;
1250         int             i;
1251
1252         sum = 0;
1253         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1254         for (i = 0; i < count; i++)
1255                 sum += ADV_INW(adv, ADV_LRAM_DATA);
1256         return (sum);
1257 }
1258
1259 static int
1260 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1261                              u_int16_t value)
1262 {
1263         int     retval;
1264
1265         retval = 0;
1266         ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1267         ADV_OUTW(adv, ADV_LRAM_DATA, value);
1268         DELAY(10000);
1269         ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1270         if (value != ADV_INW(adv, ADV_LRAM_DATA))
1271                 retval = 1;
1272         return (retval);
1273 }
1274
1275 static u_int32_t
1276 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1277 {
1278         u_int16_t           val_low, val_high;
1279
1280         ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1281
1282 #if BYTE_ORDER == BIG_ENDIAN
1283         val_high = ADV_INW(adv, ADV_LRAM_DATA);
1284         val_low = ADV_INW(adv, ADV_LRAM_DATA);
1285 #else
1286         val_low = ADV_INW(adv, ADV_LRAM_DATA);
1287         val_high = ADV_INW(adv, ADV_LRAM_DATA);
1288 #endif
1289
1290         return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1291 }
1292
1293 static void
1294 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1295 {
1296         ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1297
1298 #if BYTE_ORDER == BIG_ENDIAN
1299         ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1300         ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1301 #else
1302         ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1303         ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1304 #endif
1305 }
1306
1307 static void
1308 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1309                         u_int32_t *buffer, int count)
1310 {
1311         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1312         ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1313 }
1314
1315 static u_int16_t
1316 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1317 {
1318         u_int16_t read_wval;
1319         u_int8_t  cmd_reg;
1320
1321         adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1322         DELAY(1000);
1323         cmd_reg = addr | ADV_EEPROM_CMD_READ;
1324         adv_write_eeprom_cmd_reg(adv, cmd_reg);
1325         DELAY(1000);
1326         read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1327         DELAY(1000);
1328         return (read_wval);
1329 }
1330
1331 static u_int16_t
1332 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1333 {
1334         u_int16_t       read_value;
1335
1336         read_value = adv_read_eeprom_16(adv, addr);
1337         if (read_value != value) {
1338                 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1339                 DELAY(1000);
1340                 
1341                 ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1342                 DELAY(1000);
1343
1344                 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1345                 DELAY(20 * 1000);
1346
1347                 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1348                 DELAY(1000);
1349                 read_value = adv_read_eeprom_16(adv, addr);
1350         }
1351         return (read_value);
1352 }
1353
1354 static int
1355 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1356 {
1357         u_int8_t read_back;
1358         int      retry;
1359
1360         retry = 0;
1361         while (1) {
1362                 ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1363                 DELAY(1000);
1364                 read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1365                 if (read_back == cmd_reg) {
1366                         return (1);
1367                 }
1368                 if (retry++ > ADV_EEPROM_MAX_RETRY) {
1369                         return (0);
1370                 }
1371         }
1372 }
1373
1374 static int
1375 adv_set_eeprom_config_once(struct adv_softc *adv,
1376                            struct adv_eeprom_config *eeprom_config)
1377 {
1378         int             n_error;
1379         u_int16_t       *wbuf;
1380         u_int16_t       sum;
1381         u_int8_t        s_addr;
1382         u_int8_t        cfg_beg;
1383         u_int8_t        cfg_end;
1384
1385         wbuf = (u_int16_t *)eeprom_config;
1386         n_error = 0;
1387         sum = 0;
1388         for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1389                 sum += *wbuf;
1390                 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1391                         n_error++;
1392                 }
1393         }
1394         if (adv->type & ADV_VL) {
1395                 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1396                 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1397         } else {
1398                 cfg_beg = ADV_EEPROM_CFG_BEG;
1399                 cfg_end = ADV_EEPROM_MAX_ADDR;
1400         }
1401
1402         for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1403                 sum += *wbuf;
1404                 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1405                         n_error++;
1406                 }
1407         }
1408         *wbuf = sum;
1409         if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1410                 n_error++;
1411         }
1412         wbuf = (u_int16_t *)eeprom_config;
1413         for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1414                 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1415                         n_error++;
1416                 }
1417         }
1418         for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1419                 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1420                         n_error++;
1421                 }
1422         }
1423         return (n_error);
1424 }
1425
1426 static u_int32_t
1427 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1428                    u_int16_t *mcode_buf, u_int16_t mcode_size)
1429 {
1430         u_int32_t chksum;
1431         u_int16_t mcode_lram_size;
1432         u_int16_t mcode_chksum;
1433
1434         mcode_lram_size = mcode_size >> 1;
1435         /* XXX Why zero the memory just before you write the whole thing?? */
1436         adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1437         adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1438
1439         chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1440         mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1441                                                    ((mcode_size - s_addr
1442                                                      - ADV_CODE_SEC_BEG) >> 1));
1443         adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1444         adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1445         return (chksum);
1446 }
1447
1448 static void
1449 adv_reinit_lram(struct adv_softc *adv) {
1450         adv_init_lram(adv);
1451         adv_init_qlink_var(adv);
1452 }
1453
1454 static void
1455 adv_init_lram(struct adv_softc *adv)
1456 {
1457         u_int8_t  i;
1458         u_int16_t s_addr;
1459
1460         adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1461                          (((adv->max_openings + 2 + 1) * 64) >> 1));
1462         
1463         i = ADV_MIN_ACTIVE_QNO;
1464         s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1465
1466         adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1467         adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1468         adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1469         i++;
1470         s_addr += ADV_QBLK_SIZE;
1471         for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1472                 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1473                 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1474                 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1475         }
1476
1477         adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1478         adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1479         adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1480         i++;
1481         s_addr += ADV_QBLK_SIZE;
1482
1483         for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1484                 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1485                 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1486                 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1487         }
1488 }
1489
1490 static int
1491 adv_init_microcode_var(struct adv_softc *adv)
1492 {
1493         int      i;
1494
1495         for (i = 0; i <= ADV_MAX_TID; i++) {
1496                 
1497                 /* Start out async all around */
1498                 adv_set_syncrate(adv, /*path*/NULL,
1499                                  i, 0, 0,
1500                                  ADV_TRANS_GOAL|ADV_TRANS_CUR);
1501         }
1502
1503         adv_init_qlink_var(adv);
1504
1505         adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1506         adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1507
1508         adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1509
1510         adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1511
1512         ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1513         if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1514                 device_printf(adv->dev,
1515                     "Unable to set program counter. Aborting.\n");
1516                 return (1);
1517         }
1518         return (0);
1519 }
1520
1521 static void
1522 adv_init_qlink_var(struct adv_softc *adv)
1523 {
1524         int       i;
1525         u_int16_t lram_addr;
1526
1527         adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1528         adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1529
1530         adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1531         adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1532
1533         adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1534                          (u_int8_t)((int) adv->max_openings + 1));
1535         adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1536                          (u_int8_t)((int) adv->max_openings + 2));
1537
1538         adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1539
1540         adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1541         adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1542         adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1543         adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1544         adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1545         adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1546
1547         lram_addr = ADV_QADR_BEG;
1548         for (i = 0; i < 32; i++, lram_addr += 2)
1549                 adv_write_lram_16(adv, lram_addr, 0);
1550 }
1551
1552 static void
1553 adv_disable_interrupt(struct adv_softc *adv)
1554 {
1555         u_int16_t cfg;
1556
1557         cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1558         ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1559 }
1560
1561 static void
1562 adv_enable_interrupt(struct adv_softc *adv)
1563 {
1564         u_int16_t cfg;
1565
1566         cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1567         ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1568 }
1569
1570 static void
1571 adv_toggle_irq_act(struct adv_softc *adv)
1572 {
1573         ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1574         ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1575 }
1576
1577 void
1578 adv_start_execution(struct adv_softc *adv)
1579 {
1580         if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1581                 adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1582         }
1583 }
1584
1585 int
1586 adv_stop_chip(struct adv_softc *adv)
1587 {
1588         u_int8_t cc_val;
1589
1590         cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1591                  & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1592         ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1593         adv_set_chip_ih(adv, ADV_INS_HALT);
1594         adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1595         if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1596                 return (0);
1597         }
1598         return (1);
1599 }
1600
1601 static int
1602 adv_host_req_chip_halt(struct adv_softc *adv)
1603 {       
1604         int      count;
1605         u_int8_t saved_stop_code;
1606
1607         if (adv_is_chip_halted(adv))
1608                 return (1);
1609
1610         count = 0;
1611         saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1612         adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1613                          ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1614         while (adv_is_chip_halted(adv) == 0
1615             && count++ < 2000)
1616                 ;
1617
1618         adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1619         return (count < 2000); 
1620 }
1621
1622 static void
1623 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1624 {
1625         adv_set_bank(adv, 1);
1626         ADV_OUTW(adv, ADV_REG_IH, ins_code);
1627         adv_set_bank(adv, 0);
1628 }
1629
1630 #if 0
1631 static u_int8_t
1632 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1633 {
1634         u_int8_t scsi_ctrl;
1635
1636         adv_set_bank(adv, 1);
1637         scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1638         adv_set_bank(adv, 0);
1639         return (scsi_ctrl);
1640 }
1641 #endif
1642
1643 /*
1644  * XXX Looks like more padding issues in this routine as well.
1645  *     There has to be a way to turn this into an insw.
1646  */
1647 static void
1648 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1649                u_int16_t *inbuf, int words)
1650 {
1651         int     i;
1652
1653         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1654         for (i = 0; i < words; i++, inbuf++) {
1655                 if (i == 5) {
1656                         continue;
1657                 }
1658                 *inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1659         }
1660 }
1661
1662 static u_int
1663 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1664 {
1665         u_int     cur_used_qs;
1666         u_int     cur_free_qs;
1667
1668         cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1669
1670         if ((cur_used_qs + n_qs) <= adv->max_openings) {
1671                 cur_free_qs = adv->max_openings - cur_used_qs;
1672                 return (cur_free_qs);
1673         }
1674         adv->openings_needed = n_qs;
1675         return (0);
1676 }
1677
1678 static u_int8_t
1679 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1680                       u_int8_t n_free_q)
1681 {
1682         int i;
1683
1684         for (i = 0; i < n_free_q; i++) {
1685                 free_q_head = adv_alloc_free_queue(adv, free_q_head);
1686                 if (free_q_head == ADV_QLINK_END)
1687                         break;
1688         }
1689         return (free_q_head);
1690 }
1691
1692 static u_int8_t
1693 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1694 {
1695         u_int16_t       q_addr;
1696         u_int8_t        next_qp;
1697         u_int8_t        q_status;
1698
1699         next_qp = ADV_QLINK_END;
1700         q_addr = ADV_QNO_TO_QADDR(free_q_head);
1701         q_status = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS);
1702         
1703         if ((q_status & QS_READY) == 0)
1704                 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1705
1706         return (next_qp);
1707 }
1708
1709 static int
1710 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1711                     u_int8_t n_q_required)
1712 {
1713         u_int8_t        free_q_head;
1714         u_int8_t        next_qp;
1715         int             retval;
1716
1717         retval = 1;
1718         free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1719         if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1720             != ADV_QLINK_END) {
1721                 scsiq->q1.q_no = free_q_head;
1722
1723                 /*
1724                  * Now that we know our Q number, point our sense
1725                  * buffer pointer to a bus dma mapped area where
1726                  * we can dma the data to.
1727                  */
1728                 scsiq->q1.sense_addr = adv->sense_physbase
1729                     + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1730                 adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1731                 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1732                 adv->cur_active += n_q_required;
1733                 retval = 0;
1734         }
1735         return (retval);
1736 }
1737
1738
1739 static void
1740 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1741                             u_int q_no)
1742 {
1743         u_int8_t        sg_list_dwords;
1744         u_int8_t        sg_index, i;
1745         u_int8_t        sg_entry_cnt;
1746         u_int8_t        next_qp;
1747         u_int16_t       q_addr;
1748         struct          adv_sg_head *sg_head;
1749         struct          adv_sg_list_q scsi_sg_q;
1750
1751         sg_head = scsiq->sg_head;
1752
1753         if (sg_head) {
1754                 sg_entry_cnt = sg_head->entry_cnt - 1;
1755 #ifdef DIAGNOSTIC
1756                 if (sg_entry_cnt == 0)
1757                         panic("adv_put_ready_sg_list_queue: ScsiQ with "
1758                               "a SG list but only one element");
1759                 if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1760                         panic("adv_put_ready_sg_list_queue: ScsiQ with "
1761                               "a SG list but QC_SG_HEAD not set");
1762 #endif                  
1763                 q_addr = ADV_QNO_TO_QADDR(q_no);
1764                 sg_index = 1;
1765                 scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1766                 scsi_sg_q.sg_head_qp = q_no;
1767                 scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1768                 for (i = 0; i < sg_head->queue_cnt; i++) {
1769                         u_int8_t segs_this_q;
1770
1771                         if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1772                                 segs_this_q = ADV_SG_LIST_PER_Q;
1773                         else {
1774                                 /* This will be the last segment then */
1775                                 segs_this_q = sg_entry_cnt;
1776                                 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1777                         }
1778                         scsi_sg_q.seq_no = i + 1;
1779                         sg_list_dwords = segs_this_q << 1;
1780                         if (i == 0) {
1781                                 scsi_sg_q.sg_list_cnt = segs_this_q;
1782                                 scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1783                         } else {
1784                                 scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1785                                 scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1786                         }
1787                         next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1788                         scsi_sg_q.q_no = next_qp;
1789                         q_addr = ADV_QNO_TO_QADDR(next_qp);
1790
1791                         adv_write_lram_16_multi(adv,
1792                                                 q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1793                                                 (u_int16_t *)&scsi_sg_q,
1794                                                 sizeof(scsi_sg_q) >> 1);
1795                         adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1796                                                 (u_int32_t *)&sg_head->sg_list[sg_index],
1797                                                 sg_list_dwords);
1798                         sg_entry_cnt -= segs_this_q;
1799                         sg_index += ADV_SG_LIST_PER_Q;
1800                 }
1801         }
1802         adv_put_ready_queue(adv, scsiq, q_no);
1803 }
1804
1805 static void
1806 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1807                     u_int q_no)
1808 {
1809         struct          adv_target_transinfo* tinfo;
1810         u_int           q_addr;
1811         u_int           tid_no;
1812
1813         tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1814         tinfo = &adv->tinfo[tid_no];
1815         if ((tinfo->current.period != tinfo->goal.period)
1816          || (tinfo->current.offset != tinfo->goal.offset)) {
1817
1818                 adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1819                 scsiq->q1.cntl |= QC_MSG_OUT;
1820         }
1821         q_addr = ADV_QNO_TO_QADDR(q_no);
1822
1823         scsiq->q1.status = QS_FREE;
1824
1825         adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1826                                 (u_int16_t *)scsiq->cdbptr,
1827                                 scsiq->q2.cdb_len >> 1);
1828
1829 #if BYTE_ORDER == BIG_ENDIAN
1830         adv_adj_scsiq_endian(scsiq);
1831 #endif
1832
1833         adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1834                       (u_int16_t *) &scsiq->q1.cntl,
1835                       ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1836
1837 #ifdef CC_WRITE_IO_COUNT
1838         adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1839                           adv->req_count);
1840 #endif
1841
1842 #ifdef CC_CLEAR_DMA_REMAIN
1843
1844         adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1845         adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1846 #endif
1847
1848         adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1849                           (scsiq->q1.q_no << 8) | QS_READY);
1850 }
1851
1852 static void
1853 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1854               u_int16_t *buffer, int words)
1855 {
1856         int     i;
1857
1858         /*
1859          * XXX This routine makes *gross* assumptions
1860          * about padding in the data structures.
1861          * Either the data structures should have explicit
1862          * padding members added, or they should have padding
1863          * turned off via compiler attributes depending on
1864          * which yields better overall performance.  My hunch
1865          * would be that turning off padding would be the
1866          * faster approach as an outsw is much faster than
1867          * this crude loop and accessing un-aligned data
1868          * members isn't *that* expensive.  The other choice
1869          * would be to modify the ASC script so that the
1870          * the adv_scsiq_1 structure can be re-arranged so
1871          * padding isn't required.
1872          */
1873         ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1874         for (i = 0; i < words; i++, buffer++) {
1875                 if (i == 2 || i == 10) {
1876                         continue;
1877                 }
1878                 ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1879         }
1880 }
1881
1882 #if BYTE_ORDER == BIG_ENDIAN
1883 void
1884 adv_adj_endian_qdone_info(struct adv_q_done_info *scsiq)
1885 {
1886
1887         panic("adv(4) not supported on big-endian machines.\n");
1888 }
1889
1890 void
1891 adv_adj_scsiq_endian(struct adv_scsi_q *scsiq)
1892 {
1893
1894         panic("adv(4) not supported on big-endian machines.\n");
1895 }
1896 #endif
1897
1898 static void
1899 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1900                      u_int8_t q_cntl, target_bit_vector target_mask,
1901                      int tid_no)
1902 {
1903         struct  ext_msg ext_msg;
1904
1905         adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1906                                sizeof(ext_msg) >> 1);
1907         if ((ext_msg.msg_type == MSG_EXTENDED)
1908          && (ext_msg.msg_req == MSG_EXT_SDTR)
1909          && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1910                 union     ccb *ccb;
1911                 struct    adv_target_transinfo* tinfo;
1912                 u_int32_t cinfo_index;
1913                 u_int    period;
1914                 u_int    offset;
1915                 int      sdtr_accept;
1916                 u_int8_t orig_offset;
1917
1918                 cinfo_index =
1919                     adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1920                 ccb = adv->ccb_infos[cinfo_index].ccb;
1921                 tinfo = &adv->tinfo[tid_no];
1922                 sdtr_accept = TRUE;
1923
1924                 orig_offset = ext_msg.req_ack_offset;
1925                 if (ext_msg.xfer_period < tinfo->goal.period) {
1926                         sdtr_accept = FALSE;
1927                         ext_msg.xfer_period = tinfo->goal.period;
1928                 }
1929
1930                 /* Perform range checking */
1931                 period = ext_msg.xfer_period;
1932                 offset = ext_msg.req_ack_offset;
1933                 adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1934                 ext_msg.xfer_period = period;
1935                 ext_msg.req_ack_offset = offset;
1936                 
1937                 /* Record our current sync settings */
1938                 adv_set_syncrate(adv, ccb->ccb_h.path,
1939                                  tid_no, ext_msg.xfer_period,
1940                                  ext_msg.req_ack_offset,
1941                                  ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1942
1943                 /* Offset too high or large period forced async */
1944                 if (orig_offset != ext_msg.req_ack_offset)
1945                         sdtr_accept = FALSE;
1946
1947                 if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1948                         /* Valid response to our requested negotiation */
1949                         q_cntl &= ~QC_MSG_OUT;
1950                 } else {
1951                         /* Must Respond */
1952                         q_cntl |= QC_MSG_OUT;
1953                         adv_msgout_sdtr(adv, ext_msg.xfer_period,
1954                                         ext_msg.req_ack_offset);
1955                 }
1956
1957         } else if (ext_msg.msg_type == MSG_EXTENDED
1958                 && ext_msg.msg_req == MSG_EXT_WDTR
1959                 && ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1960
1961                 ext_msg.wdtr_width = 0;
1962                 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1963                                         (u_int16_t *)&ext_msg,
1964                                         sizeof(ext_msg) >> 1);
1965                 q_cntl |= QC_MSG_OUT;
1966         } else {
1967
1968                 ext_msg.msg_type = MSG_MESSAGE_REJECT;
1969                 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1970                                         (u_int16_t *)&ext_msg,
1971                                         sizeof(ext_msg) >> 1);
1972                 q_cntl |= QC_MSG_OUT;
1973         }
1974         adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1975 }
1976
1977 static void
1978 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1979                 u_int8_t sdtr_offset)
1980 {
1981         struct   ext_msg sdtr_buf;
1982
1983         sdtr_buf.msg_type = MSG_EXTENDED;
1984         sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1985         sdtr_buf.msg_req = MSG_EXT_SDTR;
1986         sdtr_buf.xfer_period = sdtr_period;
1987         sdtr_offset &= ADV_SYN_MAX_OFFSET;
1988         sdtr_buf.req_ack_offset = sdtr_offset;
1989         adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1990                                 (u_int16_t *) &sdtr_buf,
1991                                 sizeof(sdtr_buf) / 2);
1992 }
1993
1994 int
1995 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1996               u_int32_t status, int queued_only)
1997 {
1998         u_int16_t q_addr;
1999         u_int8_t  q_no;
2000         struct adv_q_done_info scsiq_buf;
2001         struct adv_q_done_info *scsiq;
2002         u_int8_t  target_ix;
2003         int       count;
2004
2005         if (!dumping)
2006                 mtx_assert(&adv->lock, MA_OWNED);
2007         scsiq = &scsiq_buf;
2008         target_ix = ADV_TIDLUN_TO_IX(target, lun);
2009         count = 0;
2010         for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
2011                 struct adv_ccb_info *ccb_info;
2012                 q_addr = ADV_QNO_TO_QADDR(q_no);
2013
2014                 adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
2015                 ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
2016                 if (((scsiq->q_status & QS_READY) != 0)
2017                  && ((scsiq->q_status & QS_ABORTED) == 0)
2018                  && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
2019                  && (scsiq->d2.target_ix == target_ix)
2020                  && (queued_only == 0
2021                   || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
2022                  && (ccb == NULL || (ccb == ccb_info->ccb))) {
2023                         union ccb *aborted_ccb;
2024                         struct adv_ccb_info *cinfo;
2025
2026                         scsiq->q_status |= QS_ABORTED;
2027                         adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2028                                          scsiq->q_status);
2029                         aborted_ccb = ccb_info->ccb;
2030                         /* Don't clobber earlier error codes */
2031                         if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2032                           == CAM_REQ_INPROG)
2033                                 aborted_ccb->ccb_h.status |= status;
2034                         cinfo = (struct adv_ccb_info *)
2035                             aborted_ccb->ccb_h.ccb_cinfo_ptr;
2036                         cinfo->state |= ACCB_ABORT_QUEUED;
2037                         count++;
2038                 }
2039         }
2040         return (count);
2041 }
2042
2043 int
2044 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2045 {
2046         int count; 
2047         int i;
2048         union ccb *ccb;
2049
2050         if (!dumping)
2051                 mtx_assert(&adv->lock, MA_OWNED);
2052         i = 200;
2053         while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2054             && i--)
2055                 DELAY(1000);
2056         adv_reset_chip(adv, initiate_bus_reset);
2057         adv_reinit_lram(adv);
2058         for (i = 0; i <= ADV_MAX_TID; i++)
2059                 adv_set_syncrate(adv, NULL, i, /*period*/0,
2060                                  /*offset*/0, ADV_TRANS_CUR);
2061         ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2062
2063         /* Tell the XPT layer that a bus reset occured */
2064         if (adv->path != NULL)
2065                 xpt_async(AC_BUS_RESET, adv->path, NULL);
2066
2067         count = 0;
2068         while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2069                 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2070                         ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2071                 adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2072                 count++;
2073         }
2074
2075         adv_start_chip(adv);
2076         return (count);
2077 }
2078
2079 static void
2080 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2081 {
2082         int orig_id;
2083
2084         adv_set_bank(adv, 1);
2085         orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2086         ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2087         if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2088                 adv_set_bank(adv, 0);
2089                 ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2090         }
2091         adv_set_bank(adv, 1);
2092         ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2093         adv_set_bank(adv, 0);
2094 }