2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
35 * Copyright (c) 1995-1996 Advanced System Products, Inc.
36 * All Rights Reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that redistributions of source
40 * code retain the above copyright notice and this comment without
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
51 #include <machine/bus.h>
52 #include <machine/resource.h>
57 #include <cam/cam_ccb.h>
58 #include <cam/cam_sim.h>
59 #include <cam/cam_xpt_sim.h>
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63 #include <cam/scsi/scsi_da.h>
64 #include <cam/scsi/scsi_cd.h>
67 #include <vm/vm_param.h>
70 #include <dev/advansys/advansys.h>
71 #include <dev/advansys/advmcode.h>
73 struct adv_quirk_entry {
74 struct scsi_inquiry_pattern inq_pat;
76 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS 0x01
77 #define ADV_QUIRK_FIX_ASYN_XFER 0x02
80 static struct adv_quirk_entry adv_quirk_table[] =
83 { T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
84 ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
87 { T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
92 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
93 "TANDBERG", " TDC 36", "*"
98 { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
103 T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
110 T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
116 /* Default quirk entry */
118 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
119 /*vendor*/"*", /*product*/"*", /*revision*/"*"
121 ADV_QUIRK_FIX_ASYN_XFER,
126 * Allowable periods in ns
128 static u_int8_t adv_sdtr_period_tbl[] =
140 static u_int8_t adv_sdtr_period_tbl_ultra[] =
166 u_int8_t sdtr_xfer_period;
167 u_int8_t sdtr_req_ack_offset;
179 #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period
180 #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset
181 #define wdtr_width u_ext_msg.wdtr.wdtr_width
182 #define mdp_b3 u_ext_msg.mdp_b3
183 #define mdp_b2 u_ext_msg.mdp_b2
184 #define mdp_b1 u_ext_msg.mdp_b1
185 #define mdp_b0 u_ext_msg.mdp_b0
188 * Some of the early PCI adapters have problems with
189 * async transfers. Instead use an offset of 1.
191 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
194 static void adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
195 u_int16_t *buffer, int count);
196 static void adv_write_lram_16_multi(struct adv_softc *adv,
197 u_int16_t s_addr, u_int16_t *buffer,
199 static void adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
200 u_int16_t set_value, int count);
201 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
204 static int adv_write_and_verify_lram_16(struct adv_softc *adv,
205 u_int16_t addr, u_int16_t value);
206 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
209 static void adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
211 static void adv_write_lram_32_multi(struct adv_softc *adv,
212 u_int16_t s_addr, u_int32_t *buffer,
215 /* EEPROM routines */
216 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
217 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
219 static int adv_write_eeprom_cmd_reg(struct adv_softc *adv,
221 static int adv_set_eeprom_config_once(struct adv_softc *adv,
222 struct adv_eeprom_config *eeconfig);
225 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
226 u_int16_t *mcode_buf, u_int16_t mcode_size);
228 static void adv_reinit_lram(struct adv_softc *adv);
229 static void adv_init_lram(struct adv_softc *adv);
230 static int adv_init_microcode_var(struct adv_softc *adv);
231 static void adv_init_qlink_var(struct adv_softc *adv);
234 static void adv_disable_interrupt(struct adv_softc *adv);
235 static void adv_enable_interrupt(struct adv_softc *adv);
236 static void adv_toggle_irq_act(struct adv_softc *adv);
239 static int adv_host_req_chip_halt(struct adv_softc *adv);
240 static void adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
242 static u_int8_t adv_get_chip_scsi_ctrl(struct adv_softc *adv);
245 /* Queue handling and execution */
247 adv_sgcount_to_qcount(int sgcount);
250 adv_sgcount_to_qcount(int sgcount)
254 n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
255 if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
257 return (n_sg_list_qs + 1);
260 #if BYTE_ORDER == BIG_ENDIAN
261 static void adv_adj_endian_qdone_info(struct adv_q_done_info *);
262 static void adv_adj_scsiq_endian(struct adv_scsi_q *);
264 static void adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
265 u_int16_t *inbuf, int words);
266 static u_int adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
267 static u_int8_t adv_alloc_free_queues(struct adv_softc *adv,
268 u_int8_t free_q_head, u_int8_t n_free_q);
269 static u_int8_t adv_alloc_free_queue(struct adv_softc *adv,
270 u_int8_t free_q_head);
271 static int adv_send_scsi_queue(struct adv_softc *adv,
272 struct adv_scsi_q *scsiq,
273 u_int8_t n_q_required);
274 static void adv_put_ready_sg_list_queue(struct adv_softc *adv,
275 struct adv_scsi_q *scsiq,
277 static void adv_put_ready_queue(struct adv_softc *adv,
278 struct adv_scsi_q *scsiq, u_int q_no);
279 static void adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
280 u_int16_t *buffer, int words);
283 static void adv_handle_extmsg_in(struct adv_softc *adv,
284 u_int16_t halt_q_addr, u_int8_t q_cntl,
285 target_bit_vector target_id,
287 static void adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
288 u_int8_t sdtr_offset);
289 static void adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
293 /* Exported functions first */
296 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
298 struct adv_softc *adv;
300 adv = (struct adv_softc *)callback_arg;
302 case AC_FOUND_DEVICE:
304 struct ccb_getdev *cgd;
305 target_bit_vector target_mask;
308 struct adv_quirk_entry *entry;
309 struct adv_target_transinfo* tinfo;
311 cgd = (struct ccb_getdev *)arg;
313 target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
315 num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
316 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
317 (caddr_t)adv_quirk_table,
318 num_entries, sizeof(*adv_quirk_table),
322 panic("advasync: device didn't match wildcard entry!!");
324 entry = (struct adv_quirk_entry *)match;
326 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
327 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
328 adv->fix_asyn_xfer_always |= target_mask;
330 adv->fix_asyn_xfer_always &= ~target_mask;
332 * We start out life with all bits set and clear them
333 * after we've determined that the fix isn't necessary.
334 * It may well be that we've already cleared a target
335 * before the full inquiry session completes, so don't
336 * gratuitously set a target bit even if it has this
337 * quirk. But, if the quirk exonerates a device, clear
340 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
341 adv->fix_asyn_xfer &= ~target_mask;
344 * Reset our sync settings now that we've determined
345 * what quirks are in effect for the device.
347 tinfo = &adv->tinfo[cgd->ccb_h.target_id];
348 adv_set_syncrate(adv, cgd->ccb_h.path,
349 cgd->ccb_h.target_id,
350 tinfo->current.period,
351 tinfo->current.offset,
359 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
360 target_mask = 0x01 << xpt_path_target_id(path);
361 adv->fix_asyn_xfer |= target_mask;
365 * Revert to async transfers
366 * for the next device.
368 adv_set_syncrate(adv, /*path*/NULL,
369 xpt_path_target_id(path),
372 ADV_TRANS_GOAL|ADV_TRANS_CUR);
380 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
385 * Start out with the bank reset to 0
387 control = ADV_INB(adv, ADV_CHIP_CTRL)
388 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
389 | ADV_CC_DIAG | ADV_CC_SCSI_RESET
390 | ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
392 control |= ADV_CC_BANK_ONE;
393 } else if (bank == 2) {
394 control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
396 ADV_OUTB(adv, ADV_CHIP_CTRL, control);
400 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
406 * LRAM is accessed on 16bit boundaries.
408 ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
409 word_data = ADV_INW(adv, ADV_LRAM_DATA);
411 #if BYTE_ORDER == BIG_ENDIAN
412 byte_data = (u_int8_t)(word_data & 0xFF);
414 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
417 #if BYTE_ORDER == BIG_ENDIAN
418 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
420 byte_data = (u_int8_t)(word_data & 0xFF);
427 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
431 word_data = adv_read_lram_16(adv, addr & 0xFFFE);
434 word_data |= (((u_int8_t)value << 8) & 0xFF00);
437 word_data |= ((u_int8_t)value & 0x00FF);
439 adv_write_lram_16(adv, addr & 0xFFFE, word_data);
444 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
446 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
447 return (ADV_INW(adv, ADV_LRAM_DATA));
451 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
453 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
454 ADV_OUTW(adv, ADV_LRAM_DATA, value);
458 * Determine if there is a board at "iobase" by looking
459 * for the AdvanSys signatures. Return 1 if a board is
460 * found, 0 otherwise.
463 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
467 if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
468 signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
469 if ((signature == ADV_1000_ID0W)
470 || (signature == ADV_1000_ID0W_FIX))
477 adv_lib_init(struct adv_softc *adv)
479 if ((adv->type & ADV_ULTRA) != 0) {
480 adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
481 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
483 adv->sdtr_period_tbl = adv_sdtr_period_tbl;
484 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
489 adv_get_eeprom_config(struct adv_softc *adv, struct
490 adv_eeprom_config *eeprom_config)
498 wbuf = (u_int16_t *)eeprom_config;
501 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
502 *wbuf = adv_read_eeprom_16(adv, s_addr);
506 if (adv->type & ADV_VL) {
507 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
508 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
510 cfg_beg = ADV_EEPROM_CFG_BEG;
511 cfg_end = ADV_EEPROM_MAX_ADDR;
514 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
515 *wbuf = adv_read_eeprom_16(adv, s_addr);
517 #ifdef ADV_DEBUG_EEPROM
518 printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
521 *wbuf = adv_read_eeprom_16(adv, s_addr);
526 adv_set_eeprom_config(struct adv_softc *adv,
527 struct adv_eeprom_config *eeprom_config)
533 if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
536 if (++retry > ADV_EEPROM_MAX_RETRY) {
540 return (retry > ADV_EEPROM_MAX_RETRY);
544 adv_reset_chip(struct adv_softc *adv, int reset_bus)
547 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
548 | (reset_bus ? ADV_CC_SCSI_RESET : 0));
551 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
552 adv_set_chip_ih(adv, ADV_INS_HALT);
555 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
557 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
561 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
562 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
563 return (adv_is_chip_halted(adv));
567 adv_test_external_lram(struct adv_softc* adv)
570 u_int16_t saved_value;
575 q_addr = ADV_QNO_TO_QADDR(241);
576 saved_value = adv_read_lram_16(adv, q_addr);
577 if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
579 adv_write_lram_16(adv, q_addr, saved_value);
586 adv_init_lram_and_mcode(struct adv_softc *adv)
590 adv_disable_interrupt(adv);
594 retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
596 if (retval != adv_mcode_chksum) {
597 printf("adv%d: Microcode download failed checksum!\n",
602 if (adv_init_microcode_var(adv) != 0)
605 adv_enable_interrupt(adv);
610 adv_get_chip_irq(struct adv_softc *adv)
615 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
617 if ((adv->type & ADV_VL) != 0) {
618 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
619 if ((chip_irq == 0) ||
624 return (chip_irq + (ADV_MIN_IRQ_NO - 1));
626 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
629 return (chip_irq + ADV_MIN_IRQ_NO);
633 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
637 if ((adv->type & ADV_VL) != 0) {
639 if ((irq_no < ADV_MIN_IRQ_NO)
640 || (irq_no > ADV_MAX_IRQ_NO)) {
643 irq_no -= ADV_MIN_IRQ_NO - 1;
646 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
648 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
649 adv_toggle_irq_act(adv);
651 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
652 cfg_lsw |= (irq_no & 0x07) << 2;
653 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
654 adv_toggle_irq_act(adv);
655 } else if ((adv->type & ADV_ISA) != 0) {
658 irq_no -= ADV_MIN_IRQ_NO;
659 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
660 cfg_lsw |= (irq_no & 0x03) << 2;
661 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
663 return (adv_get_chip_irq(adv));
667 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
671 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
672 if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
674 cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
675 cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
676 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
680 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
683 struct adv_target_transinfo* tinfo;
684 u_int32_t *p_data_addr;
685 u_int32_t *p_data_bcount;
686 int disable_syn_offset_one_fix;
690 u_int8_t sg_entry_cnt;
692 u_int8_t sg_entry_cnt_minus_one;
696 retval = 1; /* Default to error case */
697 target_ix = scsiq->q2.target_ix;
698 tid_no = ADV_TIX_TO_TID(target_ix);
699 tinfo = &adv->tinfo[tid_no];
701 if (scsiq->cdbptr[0] == REQUEST_SENSE) {
702 /* Renegotiate if appropriate. */
703 adv_set_syncrate(adv, /*struct cam_path */NULL,
704 tid_no, /*period*/0, /*offset*/0,
706 if (tinfo->current.period != tinfo->goal.period) {
707 adv_msgout_sdtr(adv, tinfo->goal.period,
709 scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
713 if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
714 sg_entry_cnt = scsiq->sg_head->entry_cnt;
715 sg_entry_cnt_minus_one = sg_entry_cnt - 1;
718 if (sg_entry_cnt <= 1)
719 panic("adv_execute_scsi_queue: Queue "
720 "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
722 if (sg_entry_cnt > ADV_MAX_SG_LIST)
723 panic("adv_execute_scsi_queue: "
724 "Queue with too many segs.");
726 if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
729 for (i = 0; i < sg_entry_cnt_minus_one; i++) {
730 addr = scsiq->sg_head->sg_list[i].addr +
731 scsiq->sg_head->sg_list[i].bytes;
733 if ((addr & 0x0003) != 0)
734 panic("adv_execute_scsi_queue: SG "
735 "with odd address or byte count");
740 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
742 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
744 n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
745 scsiq->sg_head->queue_cnt = n_q_required - 1;
747 p_data_addr = &scsiq->q1.data_addr;
748 p_data_bcount = &scsiq->q1.data_cnt;
752 disable_syn_offset_one_fix = FALSE;
754 if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
755 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
759 disable_syn_offset_one_fix = TRUE;
761 if (scsiq->cdbptr[0] == INQUIRY
762 || scsiq->cdbptr[0] == REQUEST_SENSE
763 || scsiq->cdbptr[0] == READ_CAPACITY
764 || scsiq->cdbptr[0] == MODE_SELECT_6
765 || scsiq->cdbptr[0] == MODE_SENSE_6
766 || scsiq->cdbptr[0] == MODE_SENSE_10
767 || scsiq->cdbptr[0] == MODE_SELECT_10
768 || scsiq->cdbptr[0] == READ_TOC) {
769 disable_syn_offset_one_fix = TRUE;
775 if (disable_syn_offset_one_fix) {
776 scsiq->q2.tag_code &=
777 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
778 scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
779 | ADV_TAG_FLAG_DISABLE_DISCONNECT);
782 if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
783 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
784 u_int8_t extra_bytes;
786 addr = *p_data_addr + *p_data_bcount;
787 extra_bytes = addr & 0x0003;
789 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
790 || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
791 scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
792 scsiq->q1.extra_bytes = extra_bytes;
793 *p_data_bcount -= extra_bytes;
797 if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
798 || ((scsiq->q1.cntl & QC_URGENT) != 0))
799 retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
806 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
807 struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
810 u_int8_t sg_queue_cnt;
812 adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
814 (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
816 #if BYTE_ORDER == BIG_ENDIAN
817 adv_adj_endian_qdone_info(scsiq);
820 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
821 scsiq->q_status = val & 0xFF;
822 scsiq->q_no = (val >> 8) & 0XFF;
824 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
825 scsiq->cntl = val & 0xFF;
826 sg_queue_cnt = (val >> 8) & 0xFF;
828 val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
829 scsiq->sense_len = val & 0xFF;
830 scsiq->extra_bytes = (val >> 8) & 0xFF;
833 * Due to a bug in accessing LRAM on the 940UA, the residual
834 * is split into separate high and low 16bit quantities.
836 scsiq->remain_bytes =
837 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
838 scsiq->remain_bytes |=
839 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
842 * XXX Is this just a safeguard or will the counter really
843 * have bogus upper bits?
845 scsiq->remain_bytes &= max_dma_count;
847 return (sg_queue_cnt);
851 adv_start_chip(struct adv_softc *adv)
853 ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
854 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
860 adv_stop_execution(struct adv_softc *adv)
865 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
866 adv_write_lram_8(adv, ADV_STOP_CODE_B,
867 ADV_STOP_REQ_RISC_STOP);
869 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
870 ADV_STOP_ACK_RISC_STOP) {
874 } while (count++ < 20);
880 adv_is_chip_halted(struct adv_softc *adv)
882 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
883 if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
891 * XXX The numeric constants and the loops in this routine
892 * need to be documented.
895 adv_ack_interrupt(struct adv_softc *adv)
903 risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
904 if (loop++ > 0x7FFF) {
907 } while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
909 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
910 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
911 host_flag | ADV_HOST_FLAG_ACK_INT);
913 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
915 while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
916 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
922 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
926 * Handle all conditions that may halt the chip waiting
927 * for us to intervene.
930 adv_isr_chip_halted(struct adv_softc *adv)
932 u_int16_t int_halt_code;
933 u_int16_t halt_q_addr;
934 target_bit_vector target_mask;
935 target_bit_vector scsi_busy;
941 int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
942 halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
943 halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
944 target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
945 q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
946 tid_no = ADV_TIX_TO_TID(target_ix);
947 target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
948 if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
950 * Temporarily disable the async fix by removing
951 * this target from the list of affected targets,
952 * setting our async rate, and then putting us
953 * back into the mask.
955 adv->fix_asyn_xfer &= ~target_mask;
956 adv_set_syncrate(adv, /*struct cam_path */NULL,
957 tid_no, /*period*/0, /*offset*/0,
959 adv->fix_asyn_xfer |= target_mask;
960 } else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
961 adv_set_syncrate(adv, /*struct cam_path */NULL,
962 tid_no, /*period*/0, /*offset*/0,
964 } else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
965 adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
966 target_mask, tid_no);
967 } else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
968 struct adv_target_transinfo* tinfo;
970 u_int32_t cinfo_index;
974 tinfo = &adv->tinfo[tid_no];
975 q_cntl |= QC_REQ_SENSE;
977 /* Renegotiate if appropriate. */
978 adv_set_syncrate(adv, /*struct cam_path */NULL,
979 tid_no, /*period*/0, /*offset*/0,
981 if (tinfo->current.period != tinfo->goal.period) {
982 adv_msgout_sdtr(adv, tinfo->goal.period,
984 q_cntl |= QC_MSG_OUT;
986 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
988 /* Don't tag request sense commands */
989 tag_code = adv_read_lram_8(adv,
990 halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
992 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
994 if ((adv->fix_asyn_xfer & target_mask) != 0
995 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
996 tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
997 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
999 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
1001 q_status = adv_read_lram_8(adv,
1002 halt_q_addr + ADV_SCSIQ_B_STATUS);
1003 q_status |= (QS_READY | QS_BUSY);
1004 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1007 * Freeze the devq until we can handle the sense condition.
1010 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1011 ccb = adv->ccb_infos[cinfo_index].ccb;
1012 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1013 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1014 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1015 /*ccb*/NULL, CAM_REQUEUE_REQ,
1016 /*queued_only*/TRUE);
1017 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1018 scsi_busy &= ~target_mask;
1019 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1021 * Ensure we have enough time to actually
1022 * retrieve the sense.
1024 untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1025 ccb->ccb_h.timeout_ch =
1026 timeout(adv_timeout, (caddr_t)ccb, 5 * hz);
1027 } else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1028 struct ext_msg out_msg;
1030 adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1031 (u_int16_t *) &out_msg,
1034 if ((out_msg.msg_type == MSG_EXTENDED)
1035 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1036 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1038 /* Revert to Async */
1039 adv_set_syncrate(adv, /*struct cam_path */NULL,
1040 tid_no, /*period*/0, /*offset*/0,
1041 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1043 q_cntl &= ~QC_MSG_OUT;
1044 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1045 } else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1046 u_int8_t scsi_status;
1048 u_int32_t cinfo_index;
1050 scsi_status = adv_read_lram_8(adv, halt_q_addr
1051 + ADV_SCSIQ_SCSI_STATUS);
1053 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1054 ccb = adv->ccb_infos[cinfo_index].ccb;
1055 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1056 ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1057 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1058 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1059 /*ccb*/NULL, CAM_REQUEUE_REQ,
1060 /*queued_only*/TRUE);
1061 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1062 scsi_busy &= ~target_mask;
1063 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1065 printf("Unhandled Halt Code %x\n", int_halt_code);
1067 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1071 adv_sdtr_to_period_offset(struct adv_softc *adv,
1072 u_int8_t sync_data, u_int8_t *period,
1073 u_int8_t *offset, int tid)
1075 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1076 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1077 *period = *offset = 0;
1079 *period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1080 *offset = sync_data & 0xF;
1085 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1086 u_int tid, u_int period, u_int offset, u_int type)
1088 struct adv_target_transinfo* tinfo;
1093 tinfo = &adv->tinfo[tid];
1095 /* Filter our input */
1096 sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1099 old_period = tinfo->current.period;
1100 old_offset = tinfo->current.offset;
1102 if ((type & ADV_TRANS_CUR) != 0
1103 && ((old_period != period || old_offset != offset)
1104 || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1109 halted = adv_is_chip_halted(adv);
1111 /* Must halt the chip first */
1112 adv_host_req_chip_halt(adv);
1114 /* Update current hardware settings */
1115 adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1118 * If a target can run in sync mode, we don't need
1119 * to check it for sync problems.
1122 adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1125 /* Start the chip again */
1126 adv_start_chip(adv);
1129 tinfo->current.period = period;
1130 tinfo->current.offset = offset;
1134 * Tell the SCSI layer about the
1135 * new transfer parameters.
1137 struct ccb_trans_settings neg;
1138 memset(&neg, 0, sizeof (neg));
1139 struct ccb_trans_settings_spi *spi =
1140 &neg.xport_specific.spi;
1142 neg.protocol = PROTO_SCSI;
1143 neg.protocol_version = SCSI_REV_2;
1144 neg.transport = XPORT_SPI;
1145 neg.transport_version = 2;
1147 spi->sync_offset = offset;
1148 spi->sync_period = period;
1149 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1150 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1151 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1152 xpt_async(AC_TRANSFER_NEG, path, &neg);
1156 if ((type & ADV_TRANS_GOAL) != 0) {
1157 tinfo->goal.period = period;
1158 tinfo->goal.offset = offset;
1161 if ((type & ADV_TRANS_USER) != 0) {
1162 tinfo->user.period = period;
1163 tinfo->user.offset = offset;
1168 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1169 u_int *offset, int tid)
1175 if (offset == NULL) {
1177 offset = &dummy_offset;
1180 if (period == NULL) {
1182 period = &dummy_period;
1185 *offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1186 if (*period != 0 && *offset != 0) {
1187 for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1188 if (*period <= adv->sdtr_period_tbl[i]) {
1190 * When responding to a target that requests
1191 * sync, the requested rate may fall between
1192 * two rates that we can output, but still be
1193 * a rate that we can receive. Because of this,
1194 * we want to respond to the target with
1195 * the same rate that it sent to us even
1196 * if the period we use to send data to it
1197 * is lower. Only lower the response period
1200 if (i == 0 /* Our maximum rate */)
1201 *period = adv->sdtr_period_tbl[0];
1202 return ((i << 4) | *offset);
1210 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1211 return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1215 /* Internal Routines */
1218 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1219 u_int16_t *buffer, int count)
1221 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1222 ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1226 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1227 u_int16_t *buffer, int count)
1229 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1230 ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1234 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1235 u_int16_t set_value, int count)
1237 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1238 bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1243 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1249 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1250 for (i = 0; i < count; i++)
1251 sum += ADV_INW(adv, ADV_LRAM_DATA);
1256 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1262 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1263 ADV_OUTW(adv, ADV_LRAM_DATA, value);
1265 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1266 if (value != ADV_INW(adv, ADV_LRAM_DATA))
1272 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1274 u_int16_t val_low, val_high;
1276 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1278 #if BYTE_ORDER == BIG_ENDIAN
1279 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1280 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1282 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1283 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1286 return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1290 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1292 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1294 #if BYTE_ORDER == BIG_ENDIAN
1295 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1296 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1298 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1299 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1304 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1305 u_int32_t *buffer, int count)
1307 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1308 ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1312 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1314 u_int16_t read_wval;
1317 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1319 cmd_reg = addr | ADV_EEPROM_CMD_READ;
1320 adv_write_eeprom_cmd_reg(adv, cmd_reg);
1322 read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1328 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1330 u_int16_t read_value;
1332 read_value = adv_read_eeprom_16(adv, addr);
1333 if (read_value != value) {
1334 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1337 ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1340 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1343 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1345 read_value = adv_read_eeprom_16(adv, addr);
1347 return (read_value);
1351 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1358 ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1360 read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1361 if (read_back == cmd_reg) {
1364 if (retry++ > ADV_EEPROM_MAX_RETRY) {
1371 adv_set_eeprom_config_once(struct adv_softc *adv,
1372 struct adv_eeprom_config *eeprom_config)
1381 wbuf = (u_int16_t *)eeprom_config;
1384 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1386 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1390 if (adv->type & ADV_VL) {
1391 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1392 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1394 cfg_beg = ADV_EEPROM_CFG_BEG;
1395 cfg_end = ADV_EEPROM_MAX_ADDR;
1398 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1400 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1405 if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1408 wbuf = (u_int16_t *)eeprom_config;
1409 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1410 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1414 for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1415 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1423 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1424 u_int16_t *mcode_buf, u_int16_t mcode_size)
1427 u_int16_t mcode_lram_size;
1428 u_int16_t mcode_chksum;
1430 mcode_lram_size = mcode_size >> 1;
1431 /* XXX Why zero the memory just before you write the whole thing?? */
1432 adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1433 adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1435 chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1436 mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1437 ((mcode_size - s_addr
1438 - ADV_CODE_SEC_BEG) >> 1));
1439 adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1440 adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1445 adv_reinit_lram(struct adv_softc *adv) {
1447 adv_init_qlink_var(adv);
1451 adv_init_lram(struct adv_softc *adv)
1456 adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1457 (((adv->max_openings + 2 + 1) * 64) >> 1));
1459 i = ADV_MIN_ACTIVE_QNO;
1460 s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1462 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1463 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1464 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1466 s_addr += ADV_QBLK_SIZE;
1467 for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1468 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1469 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1470 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1473 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1474 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1475 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1477 s_addr += ADV_QBLK_SIZE;
1479 for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1480 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1481 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1482 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1487 adv_init_microcode_var(struct adv_softc *adv)
1491 for (i = 0; i <= ADV_MAX_TID; i++) {
1493 /* Start out async all around */
1494 adv_set_syncrate(adv, /*path*/NULL,
1496 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1499 adv_init_qlink_var(adv);
1501 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1502 adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1504 adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1506 adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1508 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1509 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1510 printf("adv%d: Unable to set program counter. Aborting.\n",
1518 adv_init_qlink_var(struct adv_softc *adv)
1521 u_int16_t lram_addr;
1523 adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1524 adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1526 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1527 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1529 adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1530 (u_int8_t)((int) adv->max_openings + 1));
1531 adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1532 (u_int8_t)((int) adv->max_openings + 2));
1534 adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1536 adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1537 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1538 adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1539 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1540 adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1541 adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1543 lram_addr = ADV_QADR_BEG;
1544 for (i = 0; i < 32; i++, lram_addr += 2)
1545 adv_write_lram_16(adv, lram_addr, 0);
1549 adv_disable_interrupt(struct adv_softc *adv)
1553 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1554 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1558 adv_enable_interrupt(struct adv_softc *adv)
1562 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1563 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1567 adv_toggle_irq_act(struct adv_softc *adv)
1569 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1570 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1574 adv_start_execution(struct adv_softc *adv)
1576 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1577 adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1582 adv_stop_chip(struct adv_softc *adv)
1586 cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1587 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1588 ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1589 adv_set_chip_ih(adv, ADV_INS_HALT);
1590 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1591 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1598 adv_host_req_chip_halt(struct adv_softc *adv)
1601 u_int8_t saved_stop_code;
1603 if (adv_is_chip_halted(adv))
1607 saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1608 adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1609 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1610 while (adv_is_chip_halted(adv) == 0
1614 adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1615 return (count < 2000);
1619 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1621 adv_set_bank(adv, 1);
1622 ADV_OUTW(adv, ADV_REG_IH, ins_code);
1623 adv_set_bank(adv, 0);
1628 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1632 adv_set_bank(adv, 1);
1633 scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1634 adv_set_bank(adv, 0);
1640 * XXX Looks like more padding issues in this routine as well.
1641 * There has to be a way to turn this into an insw.
1644 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1645 u_int16_t *inbuf, int words)
1649 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1650 for (i = 0; i < words; i++, inbuf++) {
1654 *inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1659 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1664 cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1666 if ((cur_used_qs + n_qs) <= adv->max_openings) {
1667 cur_free_qs = adv->max_openings - cur_used_qs;
1668 return (cur_free_qs);
1670 adv->openings_needed = n_qs;
1675 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1680 for (i = 0; i < n_free_q; i++) {
1681 free_q_head = adv_alloc_free_queue(adv, free_q_head);
1682 if (free_q_head == ADV_QLINK_END)
1685 return (free_q_head);
1689 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1695 next_qp = ADV_QLINK_END;
1696 q_addr = ADV_QNO_TO_QADDR(free_q_head);
1697 q_status = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS);
1699 if ((q_status & QS_READY) == 0)
1700 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1706 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1707 u_int8_t n_q_required)
1709 u_int8_t free_q_head;
1716 target_ix = scsiq->q2.target_ix;
1717 tid_no = ADV_TIX_TO_TID(target_ix);
1718 free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1719 if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1721 scsiq->q1.q_no = free_q_head;
1724 * Now that we know our Q number, point our sense
1725 * buffer pointer to a bus dma mapped area where
1726 * we can dma the data to.
1728 scsiq->q1.sense_addr = adv->sense_physbase
1729 + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1730 adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1731 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1732 adv->cur_active += n_q_required;
1740 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1743 u_int8_t sg_list_dwords;
1744 u_int8_t sg_index, i;
1745 u_int8_t sg_entry_cnt;
1748 struct adv_sg_head *sg_head;
1749 struct adv_sg_list_q scsi_sg_q;
1751 sg_head = scsiq->sg_head;
1754 sg_entry_cnt = sg_head->entry_cnt - 1;
1756 if (sg_entry_cnt == 0)
1757 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1758 "a SG list but only one element");
1759 if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1760 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1761 "a SG list but QC_SG_HEAD not set");
1763 q_addr = ADV_QNO_TO_QADDR(q_no);
1765 scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1766 scsi_sg_q.sg_head_qp = q_no;
1767 scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1768 for (i = 0; i < sg_head->queue_cnt; i++) {
1769 u_int8_t segs_this_q;
1771 if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1772 segs_this_q = ADV_SG_LIST_PER_Q;
1774 /* This will be the last segment then */
1775 segs_this_q = sg_entry_cnt;
1776 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1778 scsi_sg_q.seq_no = i + 1;
1779 sg_list_dwords = segs_this_q << 1;
1781 scsi_sg_q.sg_list_cnt = segs_this_q;
1782 scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1784 scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1785 scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1787 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1788 scsi_sg_q.q_no = next_qp;
1789 q_addr = ADV_QNO_TO_QADDR(next_qp);
1791 adv_write_lram_16_multi(adv,
1792 q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1793 (u_int16_t *)&scsi_sg_q,
1794 sizeof(scsi_sg_q) >> 1);
1795 adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1796 (u_int32_t *)&sg_head->sg_list[sg_index],
1798 sg_entry_cnt -= segs_this_q;
1799 sg_index += ADV_SG_LIST_PER_Q;
1802 adv_put_ready_queue(adv, scsiq, q_no);
1806 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1809 struct adv_target_transinfo* tinfo;
1813 tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1814 tinfo = &adv->tinfo[tid_no];
1815 if ((tinfo->current.period != tinfo->goal.period)
1816 || (tinfo->current.offset != tinfo->goal.offset)) {
1818 adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1819 scsiq->q1.cntl |= QC_MSG_OUT;
1821 q_addr = ADV_QNO_TO_QADDR(q_no);
1823 scsiq->q1.status = QS_FREE;
1825 adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1826 (u_int16_t *)scsiq->cdbptr,
1827 scsiq->q2.cdb_len >> 1);
1829 #if BYTE_ORDER == BIG_ENDIAN
1830 adv_adj_scsiq_endian(scsiq);
1833 adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1834 (u_int16_t *) &scsiq->q1.cntl,
1835 ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1837 #ifdef CC_WRITE_IO_COUNT
1838 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1842 #ifdef CC_CLEAR_DMA_REMAIN
1844 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1845 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1848 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1849 (scsiq->q1.q_no << 8) | QS_READY);
1853 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1854 u_int16_t *buffer, int words)
1859 * XXX This routine makes *gross* assumptions
1860 * about padding in the data structures.
1861 * Either the data structures should have explicit
1862 * padding members added, or they should have padding
1863 * turned off via compiler attributes depending on
1864 * which yields better overall performance. My hunch
1865 * would be that turning off padding would be the
1866 * faster approach as an outsw is much faster than
1867 * this crude loop and accessing un-aligned data
1868 * members isn't *that* expensive. The other choice
1869 * would be to modify the ASC script so that the
1870 * the adv_scsiq_1 structure can be re-arranged so
1871 * padding isn't required.
1873 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1874 for (i = 0; i < words; i++, buffer++) {
1875 if (i == 2 || i == 10) {
1878 ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1882 #if BYTE_ORDER == BIG_ENDIAN
1884 adv_adj_endian_qdone_info(struct adv_q_done_info *scsiq)
1887 panic("adv(4) not supported on big-endian machines.\n");
1891 adv_adj_scsiq_endian(struct adv_scsi_q *scsiq)
1894 panic("adv(4) not supported on big-endian machines.\n");
1899 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1900 u_int8_t q_cntl, target_bit_vector target_mask,
1903 struct ext_msg ext_msg;
1905 adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1906 sizeof(ext_msg) >> 1);
1907 if ((ext_msg.msg_type == MSG_EXTENDED)
1908 && (ext_msg.msg_req == MSG_EXT_SDTR)
1909 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1911 struct adv_target_transinfo* tinfo;
1912 u_int32_t cinfo_index;
1916 u_int8_t orig_offset;
1919 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1920 ccb = adv->ccb_infos[cinfo_index].ccb;
1921 tinfo = &adv->tinfo[tid_no];
1924 orig_offset = ext_msg.req_ack_offset;
1925 if (ext_msg.xfer_period < tinfo->goal.period) {
1926 sdtr_accept = FALSE;
1927 ext_msg.xfer_period = tinfo->goal.period;
1930 /* Perform range checking */
1931 period = ext_msg.xfer_period;
1932 offset = ext_msg.req_ack_offset;
1933 adv_period_offset_to_sdtr(adv, &period, &offset, tid_no);
1934 ext_msg.xfer_period = period;
1935 ext_msg.req_ack_offset = offset;
1937 /* Record our current sync settings */
1938 adv_set_syncrate(adv, ccb->ccb_h.path,
1939 tid_no, ext_msg.xfer_period,
1940 ext_msg.req_ack_offset,
1941 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1943 /* Offset too high or large period forced async */
1944 if (orig_offset != ext_msg.req_ack_offset)
1945 sdtr_accept = FALSE;
1947 if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1948 /* Valid response to our requested negotiation */
1949 q_cntl &= ~QC_MSG_OUT;
1952 q_cntl |= QC_MSG_OUT;
1953 adv_msgout_sdtr(adv, ext_msg.xfer_period,
1954 ext_msg.req_ack_offset);
1957 } else if (ext_msg.msg_type == MSG_EXTENDED
1958 && ext_msg.msg_req == MSG_EXT_WDTR
1959 && ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1961 ext_msg.wdtr_width = 0;
1962 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1963 (u_int16_t *)&ext_msg,
1964 sizeof(ext_msg) >> 1);
1965 q_cntl |= QC_MSG_OUT;
1968 ext_msg.msg_type = MSG_MESSAGE_REJECT;
1969 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1970 (u_int16_t *)&ext_msg,
1971 sizeof(ext_msg) >> 1);
1972 q_cntl |= QC_MSG_OUT;
1974 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1978 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1979 u_int8_t sdtr_offset)
1981 struct ext_msg sdtr_buf;
1983 sdtr_buf.msg_type = MSG_EXTENDED;
1984 sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1985 sdtr_buf.msg_req = MSG_EXT_SDTR;
1986 sdtr_buf.xfer_period = sdtr_period;
1987 sdtr_offset &= ADV_SYN_MAX_OFFSET;
1988 sdtr_buf.req_ack_offset = sdtr_offset;
1989 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1990 (u_int16_t *) &sdtr_buf,
1991 sizeof(sdtr_buf) / 2);
1995 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1996 u_int32_t status, int queued_only)
2000 struct adv_q_done_info scsiq_buf;
2001 struct adv_q_done_info *scsiq;
2006 target_ix = ADV_TIDLUN_TO_IX(target, lun);
2008 for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
2009 struct adv_ccb_info *ccb_info;
2010 q_addr = ADV_QNO_TO_QADDR(q_no);
2012 adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
2013 ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
2014 if (((scsiq->q_status & QS_READY) != 0)
2015 && ((scsiq->q_status & QS_ABORTED) == 0)
2016 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
2017 && (scsiq->d2.target_ix == target_ix)
2018 && (queued_only == 0
2019 || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
2020 && (ccb == NULL || (ccb == ccb_info->ccb))) {
2021 union ccb *aborted_ccb;
2022 struct adv_ccb_info *cinfo;
2024 scsiq->q_status |= QS_ABORTED;
2025 adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2027 aborted_ccb = ccb_info->ccb;
2028 /* Don't clobber earlier error codes */
2029 if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2031 aborted_ccb->ccb_h.status |= status;
2032 cinfo = (struct adv_ccb_info *)
2033 aborted_ccb->ccb_h.ccb_cinfo_ptr;
2034 cinfo->state |= ACCB_ABORT_QUEUED;
2042 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2049 while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2052 adv_reset_chip(adv, initiate_bus_reset);
2053 adv_reinit_lram(adv);
2054 for (i = 0; i <= ADV_MAX_TID; i++)
2055 adv_set_syncrate(adv, NULL, i, /*period*/0,
2056 /*offset*/0, ADV_TRANS_CUR);
2057 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2059 /* Tell the XPT layer that a bus reset occured */
2060 if (adv->path != NULL)
2061 xpt_async(AC_BUS_RESET, adv->path, NULL);
2064 while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2065 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2066 ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2067 adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2071 adv_start_chip(adv);
2076 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2080 adv_set_bank(adv, 1);
2081 orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2082 ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2083 if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2084 adv_set_bank(adv, 0);
2085 ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2087 adv_set_bank(adv, 1);
2088 ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2089 adv_set_bank(adv, 0);