2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
35 * Copyright (c) 1995-1996 Advanced System Products, Inc.
36 * All Rights Reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that redistributions of source
40 * code retain the above copyright notice and this comment without
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
51 #include <machine/bus_pio.h>
52 #include <machine/bus.h>
53 #include <machine/resource.h>
58 #include <cam/cam_ccb.h>
59 #include <cam/cam_sim.h>
60 #include <cam/cam_xpt_sim.h>
62 #include <cam/scsi/scsi_all.h>
63 #include <cam/scsi/scsi_message.h>
64 #include <cam/scsi/scsi_da.h>
65 #include <cam/scsi/scsi_cd.h>
68 #include <vm/vm_param.h>
71 #include <dev/advansys/advansys.h>
72 #include <dev/advansys/advmcode.h>
74 struct adv_quirk_entry {
75 struct scsi_inquiry_pattern inq_pat;
77 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS 0x01
78 #define ADV_QUIRK_FIX_ASYN_XFER 0x02
81 static struct adv_quirk_entry adv_quirk_table[] =
84 { T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
85 ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
88 { T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
93 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
94 "TANDBERG", " TDC 36", "*"
99 { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
104 T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
111 T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
117 /* Default quirk entry */
119 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
120 /*vendor*/"*", /*product*/"*", /*revision*/"*"
122 ADV_QUIRK_FIX_ASYN_XFER,
127 * Allowable periods in ns
129 static u_int8_t adv_sdtr_period_tbl[] =
141 static u_int8_t adv_sdtr_period_tbl_ultra[] =
167 u_int8_t sdtr_xfer_period;
168 u_int8_t sdtr_req_ack_offset;
180 #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period
181 #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset
182 #define wdtr_width u_ext_msg.wdtr.wdtr_width
183 #define mdp_b3 u_ext_msg.mdp_b3
184 #define mdp_b2 u_ext_msg.mdp_b2
185 #define mdp_b1 u_ext_msg.mdp_b1
186 #define mdp_b0 u_ext_msg.mdp_b0
189 * Some of the early PCI adapters have problems with
190 * async transfers. Instead use an offset of 1.
192 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
195 static void adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
196 u_int16_t *buffer, int count);
197 static void adv_write_lram_16_multi(struct adv_softc *adv,
198 u_int16_t s_addr, u_int16_t *buffer,
200 static void adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
201 u_int16_t set_value, int count);
202 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
205 static int adv_write_and_verify_lram_16(struct adv_softc *adv,
206 u_int16_t addr, u_int16_t value);
207 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
210 static void adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
212 static void adv_write_lram_32_multi(struct adv_softc *adv,
213 u_int16_t s_addr, u_int32_t *buffer,
216 /* EEPROM routines */
217 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
218 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
220 static int adv_write_eeprom_cmd_reg(struct adv_softc *adv,
222 static int adv_set_eeprom_config_once(struct adv_softc *adv,
223 struct adv_eeprom_config *eeconfig);
226 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
227 u_int16_t *mcode_buf, u_int16_t mcode_size);
229 static void adv_reinit_lram(struct adv_softc *adv);
230 static void adv_init_lram(struct adv_softc *adv);
231 static int adv_init_microcode_var(struct adv_softc *adv);
232 static void adv_init_qlink_var(struct adv_softc *adv);
235 static void adv_disable_interrupt(struct adv_softc *adv);
236 static void adv_enable_interrupt(struct adv_softc *adv);
237 static void adv_toggle_irq_act(struct adv_softc *adv);
240 static int adv_host_req_chip_halt(struct adv_softc *adv);
241 static void adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
243 static u_int8_t adv_get_chip_scsi_ctrl(struct adv_softc *adv);
246 /* Queue handling and execution */
248 adv_sgcount_to_qcount(int sgcount);
251 adv_sgcount_to_qcount(int sgcount)
255 n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
256 if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
258 return (n_sg_list_qs + 1);
261 #if BYTE_ORDER == BIG_ENDIAN
262 static void adv_adj_endian_qdone_info(struct adv_q_done_info *);
263 static void adv_adj_scsiq_endian(struct adv_scsi_q *);
265 static void adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
266 u_int16_t *inbuf, int words);
267 static u_int adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
268 static u_int8_t adv_alloc_free_queues(struct adv_softc *adv,
269 u_int8_t free_q_head, u_int8_t n_free_q);
270 static u_int8_t adv_alloc_free_queue(struct adv_softc *adv,
271 u_int8_t free_q_head);
272 static int adv_send_scsi_queue(struct adv_softc *adv,
273 struct adv_scsi_q *scsiq,
274 u_int8_t n_q_required);
275 static void adv_put_ready_sg_list_queue(struct adv_softc *adv,
276 struct adv_scsi_q *scsiq,
278 static void adv_put_ready_queue(struct adv_softc *adv,
279 struct adv_scsi_q *scsiq, u_int q_no);
280 static void adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
281 u_int16_t *buffer, int words);
284 static void adv_handle_extmsg_in(struct adv_softc *adv,
285 u_int16_t halt_q_addr, u_int8_t q_cntl,
286 target_bit_vector target_id,
288 static void adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
289 u_int8_t sdtr_offset);
290 static void adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
294 /* Exported functions first */
297 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
299 struct adv_softc *adv;
301 adv = (struct adv_softc *)callback_arg;
303 case AC_FOUND_DEVICE:
305 struct ccb_getdev *cgd;
306 target_bit_vector target_mask;
309 struct adv_quirk_entry *entry;
310 struct adv_target_transinfo* tinfo;
312 cgd = (struct ccb_getdev *)arg;
314 target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
316 num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
317 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
318 (caddr_t)adv_quirk_table,
319 num_entries, sizeof(*adv_quirk_table),
323 panic("advasync: device didn't match wildcard entry!!");
325 entry = (struct adv_quirk_entry *)match;
327 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
328 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
329 adv->fix_asyn_xfer_always |= target_mask;
331 adv->fix_asyn_xfer_always &= ~target_mask;
333 * We start out life with all bits set and clear them
334 * after we've determined that the fix isn't necessary.
335 * It may well be that we've already cleared a target
336 * before the full inquiry session completes, so don't
337 * gratuitously set a target bit even if it has this
338 * quirk. But, if the quirk exonerates a device, clear
341 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
342 adv->fix_asyn_xfer &= ~target_mask;
345 * Reset our sync settings now that we've determined
346 * what quirks are in effect for the device.
348 tinfo = &adv->tinfo[cgd->ccb_h.target_id];
349 adv_set_syncrate(adv, cgd->ccb_h.path,
350 cgd->ccb_h.target_id,
351 tinfo->current.period,
352 tinfo->current.offset,
360 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
361 target_mask = 0x01 << xpt_path_target_id(path);
362 adv->fix_asyn_xfer |= target_mask;
366 * Revert to async transfers
367 * for the next device.
369 adv_set_syncrate(adv, /*path*/NULL,
370 xpt_path_target_id(path),
373 ADV_TRANS_GOAL|ADV_TRANS_CUR);
381 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
386 * Start out with the bank reset to 0
388 control = ADV_INB(adv, ADV_CHIP_CTRL)
389 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
390 | ADV_CC_DIAG | ADV_CC_SCSI_RESET
391 | ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
393 control |= ADV_CC_BANK_ONE;
394 } else if (bank == 2) {
395 control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
397 ADV_OUTB(adv, ADV_CHIP_CTRL, control);
401 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
407 * LRAM is accessed on 16bit boundaries.
409 ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
410 word_data = ADV_INW(adv, ADV_LRAM_DATA);
412 #if BYTE_ORDER == BIG_ENDIAN
413 byte_data = (u_int8_t)(word_data & 0xFF);
415 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
418 #if BYTE_ORDER == BIG_ENDIAN
419 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
421 byte_data = (u_int8_t)(word_data & 0xFF);
428 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
432 word_data = adv_read_lram_16(adv, addr & 0xFFFE);
435 word_data |= (((u_int8_t)value << 8) & 0xFF00);
438 word_data |= ((u_int8_t)value & 0x00FF);
440 adv_write_lram_16(adv, addr & 0xFFFE, word_data);
445 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
447 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
448 return (ADV_INW(adv, ADV_LRAM_DATA));
452 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
454 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
455 ADV_OUTW(adv, ADV_LRAM_DATA, value);
459 * Determine if there is a board at "iobase" by looking
460 * for the AdvanSys signatures. Return 1 if a board is
461 * found, 0 otherwise.
464 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
468 if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
469 signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
470 if ((signature == ADV_1000_ID0W)
471 || (signature == ADV_1000_ID0W_FIX))
478 adv_lib_init(struct adv_softc *adv)
480 if ((adv->type & ADV_ULTRA) != 0) {
481 adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
482 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
484 adv->sdtr_period_tbl = adv_sdtr_period_tbl;
485 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
490 adv_get_eeprom_config(struct adv_softc *adv, struct
491 adv_eeprom_config *eeprom_config)
499 wbuf = (u_int16_t *)eeprom_config;
502 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
503 *wbuf = adv_read_eeprom_16(adv, s_addr);
507 if (adv->type & ADV_VL) {
508 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
509 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
511 cfg_beg = ADV_EEPROM_CFG_BEG;
512 cfg_end = ADV_EEPROM_MAX_ADDR;
515 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
516 *wbuf = adv_read_eeprom_16(adv, s_addr);
519 printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
522 *wbuf = adv_read_eeprom_16(adv, s_addr);
527 adv_set_eeprom_config(struct adv_softc *adv,
528 struct adv_eeprom_config *eeprom_config)
534 if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
537 if (++retry > ADV_EEPROM_MAX_RETRY) {
541 return (retry > ADV_EEPROM_MAX_RETRY);
545 adv_reset_chip(struct adv_softc *adv, int reset_bus)
548 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
549 | (reset_bus ? ADV_CC_SCSI_RESET : 0));
552 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
553 adv_set_chip_ih(adv, ADV_INS_HALT);
556 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
558 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
562 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
563 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
564 return (adv_is_chip_halted(adv));
568 adv_test_external_lram(struct adv_softc* adv)
571 u_int16_t saved_value;
576 q_addr = ADV_QNO_TO_QADDR(241);
577 saved_value = adv_read_lram_16(adv, q_addr);
578 if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
580 adv_write_lram_16(adv, q_addr, saved_value);
587 adv_init_lram_and_mcode(struct adv_softc *adv)
591 adv_disable_interrupt(adv);
595 retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
597 if (retval != adv_mcode_chksum) {
598 printf("adv%d: Microcode download failed checksum!\n",
603 if (adv_init_microcode_var(adv) != 0)
606 adv_enable_interrupt(adv);
611 adv_get_chip_irq(struct adv_softc *adv)
616 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
618 if ((adv->type & ADV_VL) != 0) {
619 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
620 if ((chip_irq == 0) ||
625 return (chip_irq + (ADV_MIN_IRQ_NO - 1));
627 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
630 return (chip_irq + ADV_MIN_IRQ_NO);
634 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
638 if ((adv->type & ADV_VL) != 0) {
640 if ((irq_no < ADV_MIN_IRQ_NO)
641 || (irq_no > ADV_MAX_IRQ_NO)) {
644 irq_no -= ADV_MIN_IRQ_NO - 1;
647 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
649 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
650 adv_toggle_irq_act(adv);
652 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
653 cfg_lsw |= (irq_no & 0x07) << 2;
654 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
655 adv_toggle_irq_act(adv);
656 } else if ((adv->type & ADV_ISA) != 0) {
659 irq_no -= ADV_MIN_IRQ_NO;
660 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
661 cfg_lsw |= (irq_no & 0x03) << 2;
662 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
664 return (adv_get_chip_irq(adv));
668 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
672 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
673 if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
675 cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
676 cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
677 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
681 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
684 struct adv_target_transinfo* tinfo;
685 u_int32_t *p_data_addr;
686 u_int32_t *p_data_bcount;
687 int disable_syn_offset_one_fix;
691 u_int8_t sg_entry_cnt;
693 u_int8_t sg_entry_cnt_minus_one;
697 retval = 1; /* Default to error case */
698 target_ix = scsiq->q2.target_ix;
699 tid_no = ADV_TIX_TO_TID(target_ix);
700 tinfo = &adv->tinfo[tid_no];
702 if (scsiq->cdbptr[0] == REQUEST_SENSE) {
703 /* Renegotiate if appropriate. */
704 adv_set_syncrate(adv, /*struct cam_path */NULL,
705 tid_no, /*period*/0, /*offset*/0,
707 if (tinfo->current.period != tinfo->goal.period) {
708 adv_msgout_sdtr(adv, tinfo->goal.period,
710 scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
714 if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
715 sg_entry_cnt = scsiq->sg_head->entry_cnt;
716 sg_entry_cnt_minus_one = sg_entry_cnt - 1;
719 if (sg_entry_cnt <= 1)
720 panic("adv_execute_scsi_queue: Queue "
721 "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
723 if (sg_entry_cnt > ADV_MAX_SG_LIST)
724 panic("adv_execute_scsi_queue: "
725 "Queue with too many segs.");
727 if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
730 for (i = 0; i < sg_entry_cnt_minus_one; i++) {
731 addr = scsiq->sg_head->sg_list[i].addr +
732 scsiq->sg_head->sg_list[i].bytes;
734 if ((addr & 0x0003) != 0)
735 panic("adv_execute_scsi_queue: SG "
736 "with odd address or byte count");
741 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
743 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
745 n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
746 scsiq->sg_head->queue_cnt = n_q_required - 1;
748 p_data_addr = &scsiq->q1.data_addr;
749 p_data_bcount = &scsiq->q1.data_cnt;
753 disable_syn_offset_one_fix = FALSE;
755 if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
756 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
760 disable_syn_offset_one_fix = TRUE;
762 if (scsiq->cdbptr[0] == INQUIRY
763 || scsiq->cdbptr[0] == REQUEST_SENSE
764 || scsiq->cdbptr[0] == READ_CAPACITY
765 || scsiq->cdbptr[0] == MODE_SELECT_6
766 || scsiq->cdbptr[0] == MODE_SENSE_6
767 || scsiq->cdbptr[0] == MODE_SENSE_10
768 || scsiq->cdbptr[0] == MODE_SELECT_10
769 || scsiq->cdbptr[0] == READ_TOC) {
770 disable_syn_offset_one_fix = TRUE;
776 if (disable_syn_offset_one_fix) {
777 scsiq->q2.tag_code &=
778 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
779 scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
780 | ADV_TAG_FLAG_DISABLE_DISCONNECT);
783 if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
784 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
785 u_int8_t extra_bytes;
787 addr = *p_data_addr + *p_data_bcount;
788 extra_bytes = addr & 0x0003;
790 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
791 || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
792 scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
793 scsiq->q1.extra_bytes = extra_bytes;
794 *p_data_bcount -= extra_bytes;
798 if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
799 || ((scsiq->q1.cntl & QC_URGENT) != 0))
800 retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
807 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
808 struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
811 u_int8_t sg_queue_cnt;
813 adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
815 (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
817 #if BYTE_ORDER == BIG_ENDIAN
818 adv_adj_endian_qdone_info(scsiq);
821 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
822 scsiq->q_status = val & 0xFF;
823 scsiq->q_no = (val >> 8) & 0XFF;
825 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
826 scsiq->cntl = val & 0xFF;
827 sg_queue_cnt = (val >> 8) & 0xFF;
829 val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
830 scsiq->sense_len = val & 0xFF;
831 scsiq->extra_bytes = (val >> 8) & 0xFF;
834 * Due to a bug in accessing LRAM on the 940UA, the residual
835 * is split into separate high and low 16bit quantities.
837 scsiq->remain_bytes =
838 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
839 scsiq->remain_bytes |=
840 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
843 * XXX Is this just a safeguard or will the counter really
844 * have bogus upper bits?
846 scsiq->remain_bytes &= max_dma_count;
848 return (sg_queue_cnt);
852 adv_start_chip(struct adv_softc *adv)
854 ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
855 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
861 adv_stop_execution(struct adv_softc *adv)
866 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
867 adv_write_lram_8(adv, ADV_STOP_CODE_B,
868 ADV_STOP_REQ_RISC_STOP);
870 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
871 ADV_STOP_ACK_RISC_STOP) {
875 } while (count++ < 20);
881 adv_is_chip_halted(struct adv_softc *adv)
883 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
884 if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
892 * XXX The numeric constants and the loops in this routine
893 * need to be documented.
896 adv_ack_interrupt(struct adv_softc *adv)
904 risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
905 if (loop++ > 0x7FFF) {
908 } while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
910 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
911 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
912 host_flag | ADV_HOST_FLAG_ACK_INT);
914 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
916 while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
917 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
923 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
927 * Handle all conditions that may halt the chip waiting
928 * for us to intervene.
931 adv_isr_chip_halted(struct adv_softc *adv)
933 u_int16_t int_halt_code;
934 u_int16_t halt_q_addr;
935 target_bit_vector target_mask;
936 target_bit_vector scsi_busy;
942 int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
943 halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
944 halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
945 target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
946 q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
947 tid_no = ADV_TIX_TO_TID(target_ix);
948 target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
949 if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
951 * Temporarily disable the async fix by removing
952 * this target from the list of affected targets,
953 * setting our async rate, and then putting us
954 * back into the mask.
956 adv->fix_asyn_xfer &= ~target_mask;
957 adv_set_syncrate(adv, /*struct cam_path */NULL,
958 tid_no, /*period*/0, /*offset*/0,
960 adv->fix_asyn_xfer |= target_mask;
961 } else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
962 adv_set_syncrate(adv, /*struct cam_path */NULL,
963 tid_no, /*period*/0, /*offset*/0,
965 } else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
966 adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
967 target_mask, tid_no);
968 } else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
969 struct adv_target_transinfo* tinfo;
971 u_int32_t cinfo_index;
975 tinfo = &adv->tinfo[tid_no];
976 q_cntl |= QC_REQ_SENSE;
978 /* Renegotiate if appropriate. */
979 adv_set_syncrate(adv, /*struct cam_path */NULL,
980 tid_no, /*period*/0, /*offset*/0,
982 if (tinfo->current.period != tinfo->goal.period) {
983 adv_msgout_sdtr(adv, tinfo->goal.period,
985 q_cntl |= QC_MSG_OUT;
987 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
989 /* Don't tag request sense commands */
990 tag_code = adv_read_lram_8(adv,
991 halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
993 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
995 if ((adv->fix_asyn_xfer & target_mask) != 0
996 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
997 tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
998 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
1000 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
1002 q_status = adv_read_lram_8(adv,
1003 halt_q_addr + ADV_SCSIQ_B_STATUS);
1004 q_status |= (QS_READY | QS_BUSY);
1005 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1008 * Freeze the devq until we can handle the sense condition.
1011 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1012 ccb = adv->ccb_infos[cinfo_index].ccb;
1013 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1014 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1015 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1016 /*ccb*/NULL, CAM_REQUEUE_REQ,
1017 /*queued_only*/TRUE);
1018 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1019 scsi_busy &= ~target_mask;
1020 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1022 * Ensure we have enough time to actually
1023 * retrieve the sense.
1025 untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1026 ccb->ccb_h.timeout_ch =
1027 timeout(adv_timeout, (caddr_t)ccb, 5 * hz);
1028 } else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1029 struct ext_msg out_msg;
1031 adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1032 (u_int16_t *) &out_msg,
1035 if ((out_msg.msg_type == MSG_EXTENDED)
1036 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1037 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1039 /* Revert to Async */
1040 adv_set_syncrate(adv, /*struct cam_path */NULL,
1041 tid_no, /*period*/0, /*offset*/0,
1042 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1044 q_cntl &= ~QC_MSG_OUT;
1045 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1046 } else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1047 u_int8_t scsi_status;
1049 u_int32_t cinfo_index;
1051 scsi_status = adv_read_lram_8(adv, halt_q_addr
1052 + ADV_SCSIQ_SCSI_STATUS);
1054 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1055 ccb = adv->ccb_infos[cinfo_index].ccb;
1056 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1057 ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1058 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1059 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1060 /*ccb*/NULL, CAM_REQUEUE_REQ,
1061 /*queued_only*/TRUE);
1062 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1063 scsi_busy &= ~target_mask;
1064 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1066 printf("Unhandled Halt Code %x\n", int_halt_code);
1068 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1072 adv_sdtr_to_period_offset(struct adv_softc *adv,
1073 u_int8_t sync_data, u_int8_t *period,
1074 u_int8_t *offset, int tid)
1076 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1077 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1078 *period = *offset = 0;
1080 *period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1081 *offset = sync_data & 0xF;
1086 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1087 u_int tid, u_int period, u_int offset, u_int type)
1089 struct adv_target_transinfo* tinfo;
1094 tinfo = &adv->tinfo[tid];
1096 /* Filter our input */
1097 sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1100 old_period = tinfo->current.period;
1101 old_offset = tinfo->current.offset;
1103 if ((type & ADV_TRANS_CUR) != 0
1104 && ((old_period != period || old_offset != offset)
1105 || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1110 halted = adv_is_chip_halted(adv);
1112 /* Must halt the chip first */
1113 adv_host_req_chip_halt(adv);
1115 /* Update current hardware settings */
1116 adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1119 * If a target can run in sync mode, we don't need
1120 * to check it for sync problems.
1123 adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1126 /* Start the chip again */
1127 adv_start_chip(adv);
1130 tinfo->current.period = period;
1131 tinfo->current.offset = offset;
1135 * Tell the SCSI layer about the
1136 * new transfer parameters.
1138 struct ccb_trans_settings neg;
1140 neg.sync_period = period;
1141 neg.sync_offset = offset;
1142 neg.valid = CCB_TRANS_SYNC_RATE_VALID
1143 | CCB_TRANS_SYNC_OFFSET_VALID;
1144 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1145 xpt_async(AC_TRANSFER_NEG, path, &neg);
1149 if ((type & ADV_TRANS_GOAL) != 0) {
1150 tinfo->goal.period = period;
1151 tinfo->goal.offset = offset;
1154 if ((type & ADV_TRANS_USER) != 0) {
1155 tinfo->user.period = period;
1156 tinfo->user.offset = offset;
1161 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1162 u_int *offset, int tid)
1168 if (offset == NULL) {
1170 offset = &dummy_offset;
1173 if (period == NULL) {
1175 period = &dummy_period;
1178 *offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1179 if (*period != 0 && *offset != 0) {
1180 for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1181 if (*period <= adv->sdtr_period_tbl[i]) {
1183 * When responding to a target that requests
1184 * sync, the requested rate may fall between
1185 * two rates that we can output, but still be
1186 * a rate that we can receive. Because of this,
1187 * we want to respond to the target with
1188 * the same rate that it sent to us even
1189 * if the period we use to send data to it
1190 * is lower. Only lower the response period
1193 if (i == 0 /* Our maximum rate */)
1194 *period = adv->sdtr_period_tbl[0];
1195 return ((i << 4) | *offset);
1203 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1204 return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1208 /* Internal Routines */
1211 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1212 u_int16_t *buffer, int count)
1214 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1215 ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1219 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1220 u_int16_t *buffer, int count)
1222 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1223 ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1227 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1228 u_int16_t set_value, int count)
1230 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1231 bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1236 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1242 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1243 for (i = 0; i < count; i++)
1244 sum += ADV_INW(adv, ADV_LRAM_DATA);
1249 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1255 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1256 ADV_OUTW(adv, ADV_LRAM_DATA, value);
1258 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1259 if (value != ADV_INW(adv, ADV_LRAM_DATA))
1265 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1267 u_int16_t val_low, val_high;
1269 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1271 #if BYTE_ORDER == BIG_ENDIAN
1272 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1273 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1275 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1276 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1279 return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1283 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1285 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1287 #if BYTE_ORDER == BIG_ENDIAN
1288 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1289 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1291 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1292 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1297 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1298 u_int32_t *buffer, int count)
1300 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1301 ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1305 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1307 u_int16_t read_wval;
1310 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1312 cmd_reg = addr | ADV_EEPROM_CMD_READ;
1313 adv_write_eeprom_cmd_reg(adv, cmd_reg);
1315 read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1321 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1323 u_int16_t read_value;
1325 read_value = adv_read_eeprom_16(adv, addr);
1326 if (read_value != value) {
1327 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1330 ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1333 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1336 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1338 read_value = adv_read_eeprom_16(adv, addr);
1340 return (read_value);
1344 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1351 ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1353 read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1354 if (read_back == cmd_reg) {
1357 if (retry++ > ADV_EEPROM_MAX_RETRY) {
1364 adv_set_eeprom_config_once(struct adv_softc *adv,
1365 struct adv_eeprom_config *eeprom_config)
1374 wbuf = (u_int16_t *)eeprom_config;
1377 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1379 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1383 if (adv->type & ADV_VL) {
1384 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1385 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1387 cfg_beg = ADV_EEPROM_CFG_BEG;
1388 cfg_end = ADV_EEPROM_MAX_ADDR;
1391 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1393 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1398 if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1401 wbuf = (u_int16_t *)eeprom_config;
1402 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1403 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1407 for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1408 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1416 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1417 u_int16_t *mcode_buf, u_int16_t mcode_size)
1420 u_int16_t mcode_lram_size;
1421 u_int16_t mcode_chksum;
1423 mcode_lram_size = mcode_size >> 1;
1424 /* XXX Why zero the memory just before you write the whole thing?? */
1425 adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1426 adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1428 chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1429 mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1430 ((mcode_size - s_addr
1431 - ADV_CODE_SEC_BEG) >> 1));
1432 adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1433 adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1438 adv_reinit_lram(struct adv_softc *adv) {
1440 adv_init_qlink_var(adv);
1444 adv_init_lram(struct adv_softc *adv)
1449 adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1450 (((adv->max_openings + 2 + 1) * 64) >> 1));
1452 i = ADV_MIN_ACTIVE_QNO;
1453 s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1455 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1456 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1457 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1459 s_addr += ADV_QBLK_SIZE;
1460 for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1461 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1462 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1463 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1466 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1467 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1468 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1470 s_addr += ADV_QBLK_SIZE;
1472 for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1473 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1474 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1475 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1480 adv_init_microcode_var(struct adv_softc *adv)
1484 for (i = 0; i <= ADV_MAX_TID; i++) {
1486 /* Start out async all around */
1487 adv_set_syncrate(adv, /*path*/NULL,
1489 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1492 adv_init_qlink_var(adv);
1494 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1495 adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1497 adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1499 adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1501 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1502 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1503 printf("adv%d: Unable to set program counter. Aborting.\n",
1511 adv_init_qlink_var(struct adv_softc *adv)
1514 u_int16_t lram_addr;
1516 adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1517 adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1519 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1520 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1522 adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1523 (u_int8_t)((int) adv->max_openings + 1));
1524 adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1525 (u_int8_t)((int) adv->max_openings + 2));
1527 adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1529 adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1530 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1531 adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1532 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1533 adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1534 adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1536 lram_addr = ADV_QADR_BEG;
1537 for (i = 0; i < 32; i++, lram_addr += 2)
1538 adv_write_lram_16(adv, lram_addr, 0);
1542 adv_disable_interrupt(struct adv_softc *adv)
1546 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1547 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1551 adv_enable_interrupt(struct adv_softc *adv)
1555 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1556 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1560 adv_toggle_irq_act(struct adv_softc *adv)
1562 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1563 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1567 adv_start_execution(struct adv_softc *adv)
1569 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1570 adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1575 adv_stop_chip(struct adv_softc *adv)
1579 cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1580 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1581 ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1582 adv_set_chip_ih(adv, ADV_INS_HALT);
1583 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1584 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1591 adv_host_req_chip_halt(struct adv_softc *adv)
1594 u_int8_t saved_stop_code;
1596 if (adv_is_chip_halted(adv))
1600 saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1601 adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1602 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1603 while (adv_is_chip_halted(adv) == 0
1607 adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1608 return (count < 2000);
1612 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1614 adv_set_bank(adv, 1);
1615 ADV_OUTW(adv, ADV_REG_IH, ins_code);
1616 adv_set_bank(adv, 0);
1621 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1625 adv_set_bank(adv, 1);
1626 scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1627 adv_set_bank(adv, 0);
1633 * XXX Looks like more padding issues in this routine as well.
1634 * There has to be a way to turn this into an insw.
1637 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1638 u_int16_t *inbuf, int words)
1642 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1643 for (i = 0; i < words; i++, inbuf++) {
1647 *inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1652 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1657 cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1659 if ((cur_used_qs + n_qs) <= adv->max_openings) {
1660 cur_free_qs = adv->max_openings - cur_used_qs;
1661 return (cur_free_qs);
1663 adv->openings_needed = n_qs;
1668 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1673 for (i = 0; i < n_free_q; i++) {
1674 free_q_head = adv_alloc_free_queue(adv, free_q_head);
1675 if (free_q_head == ADV_QLINK_END)
1678 return (free_q_head);
1682 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1688 next_qp = ADV_QLINK_END;
1689 q_addr = ADV_QNO_TO_QADDR(free_q_head);
1690 q_status = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS);
1692 if ((q_status & QS_READY) == 0)
1693 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1699 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1700 u_int8_t n_q_required)
1702 u_int8_t free_q_head;
1709 target_ix = scsiq->q2.target_ix;
1710 tid_no = ADV_TIX_TO_TID(target_ix);
1711 free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1712 if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1714 scsiq->q1.q_no = free_q_head;
1717 * Now that we know our Q number, point our sense
1718 * buffer pointer to a bus dma mapped area where
1719 * we can dma the data to.
1721 scsiq->q1.sense_addr = adv->sense_physbase
1722 + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1723 adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1724 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1725 adv->cur_active += n_q_required;
1733 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1736 u_int8_t sg_list_dwords;
1737 u_int8_t sg_index, i;
1738 u_int8_t sg_entry_cnt;
1741 struct adv_sg_head *sg_head;
1742 struct adv_sg_list_q scsi_sg_q;
1744 sg_head = scsiq->sg_head;
1747 sg_entry_cnt = sg_head->entry_cnt - 1;
1749 if (sg_entry_cnt == 0)
1750 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1751 "a SG list but only one element");
1752 if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1753 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1754 "a SG list but QC_SG_HEAD not set");
1756 q_addr = ADV_QNO_TO_QADDR(q_no);
1758 scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1759 scsi_sg_q.sg_head_qp = q_no;
1760 scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1761 for (i = 0; i < sg_head->queue_cnt; i++) {
1762 u_int8_t segs_this_q;
1764 if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1765 segs_this_q = ADV_SG_LIST_PER_Q;
1767 /* This will be the last segment then */
1768 segs_this_q = sg_entry_cnt;
1769 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1771 scsi_sg_q.seq_no = i + 1;
1772 sg_list_dwords = segs_this_q << 1;
1774 scsi_sg_q.sg_list_cnt = segs_this_q;
1775 scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1777 scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1778 scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1780 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1781 scsi_sg_q.q_no = next_qp;
1782 q_addr = ADV_QNO_TO_QADDR(next_qp);
1784 adv_write_lram_16_multi(adv,
1785 q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1786 (u_int16_t *)&scsi_sg_q,
1787 sizeof(scsi_sg_q) >> 1);
1788 adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1789 (u_int32_t *)&sg_head->sg_list[sg_index],
1791 sg_entry_cnt -= segs_this_q;
1792 sg_index += ADV_SG_LIST_PER_Q;
1795 adv_put_ready_queue(adv, scsiq, q_no);
1799 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1802 struct adv_target_transinfo* tinfo;
1806 tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1807 tinfo = &adv->tinfo[tid_no];
1808 if ((tinfo->current.period != tinfo->goal.period)
1809 || (tinfo->current.offset != tinfo->goal.offset)) {
1811 adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1812 scsiq->q1.cntl |= QC_MSG_OUT;
1814 q_addr = ADV_QNO_TO_QADDR(q_no);
1816 scsiq->q1.status = QS_FREE;
1818 adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1819 (u_int16_t *)scsiq->cdbptr,
1820 scsiq->q2.cdb_len >> 1);
1822 #if BYTE_ORDER == BIG_ENDIAN
1823 adv_adj_scsiq_endian(scsiq);
1826 adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1827 (u_int16_t *) &scsiq->q1.cntl,
1828 ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1830 #if CC_WRITE_IO_COUNT
1831 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1835 #if CC_CLEAR_DMA_REMAIN
1837 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1838 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1841 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1842 (scsiq->q1.q_no << 8) | QS_READY);
1846 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1847 u_int16_t *buffer, int words)
1852 * XXX This routine makes *gross* assumptions
1853 * about padding in the data structures.
1854 * Either the data structures should have explicit
1855 * padding members added, or they should have padding
1856 * turned off via compiler attributes depending on
1857 * which yields better overall performance. My hunch
1858 * would be that turning off padding would be the
1859 * faster approach as an outsw is much faster than
1860 * this crude loop and accessing un-aligned data
1861 * members isn't *that* expensive. The other choice
1862 * would be to modify the ASC script so that the
1863 * the adv_scsiq_1 structure can be re-arranged so
1864 * padding isn't required.
1866 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1867 for (i = 0; i < words; i++, buffer++) {
1868 if (i == 2 || i == 10) {
1871 ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1875 #if BYTE_ORDER == BIG_ENDIAN
1877 adv_adj_endian_qdone_info(struct adv_q_done_info *scsiq)
1880 panic("adv(4) not supported on big-endian machines.\n");
1884 adv_adj_scsiq_endian(struct adv_scsi_q *scsiq)
1887 panic("adv(4) not supported on big-endian machines.\n");
1892 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1893 u_int8_t q_cntl, target_bit_vector target_mask,
1896 struct ext_msg ext_msg;
1898 adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1899 sizeof(ext_msg) >> 1);
1900 if ((ext_msg.msg_type == MSG_EXTENDED)
1901 && (ext_msg.msg_req == MSG_EXT_SDTR)
1902 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1904 struct adv_target_transinfo* tinfo;
1905 u_int32_t cinfo_index;
1909 u_int8_t orig_offset;
1912 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1913 ccb = adv->ccb_infos[cinfo_index].ccb;
1914 tinfo = &adv->tinfo[tid_no];
1917 orig_offset = ext_msg.req_ack_offset;
1918 if (ext_msg.xfer_period < tinfo->goal.period) {
1919 sdtr_accept = FALSE;
1920 ext_msg.xfer_period = tinfo->goal.period;
1923 /* Perform range checking */
1924 period = ext_msg.xfer_period;
1925 offset = ext_msg.req_ack_offset;
1926 adv_period_offset_to_sdtr(adv, &period, &offset, tid_no);
1927 ext_msg.xfer_period = period;
1928 ext_msg.req_ack_offset = offset;
1930 /* Record our current sync settings */
1931 adv_set_syncrate(adv, ccb->ccb_h.path,
1932 tid_no, ext_msg.xfer_period,
1933 ext_msg.req_ack_offset,
1934 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1936 /* Offset too high or large period forced async */
1937 if (orig_offset != ext_msg.req_ack_offset)
1938 sdtr_accept = FALSE;
1940 if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1941 /* Valid response to our requested negotiation */
1942 q_cntl &= ~QC_MSG_OUT;
1945 q_cntl |= QC_MSG_OUT;
1946 adv_msgout_sdtr(adv, ext_msg.xfer_period,
1947 ext_msg.req_ack_offset);
1950 } else if (ext_msg.msg_type == MSG_EXTENDED
1951 && ext_msg.msg_req == MSG_EXT_WDTR
1952 && ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1954 ext_msg.wdtr_width = 0;
1955 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1956 (u_int16_t *)&ext_msg,
1957 sizeof(ext_msg) >> 1);
1958 q_cntl |= QC_MSG_OUT;
1961 ext_msg.msg_type = MSG_MESSAGE_REJECT;
1962 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1963 (u_int16_t *)&ext_msg,
1964 sizeof(ext_msg) >> 1);
1965 q_cntl |= QC_MSG_OUT;
1967 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1971 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1972 u_int8_t sdtr_offset)
1974 struct ext_msg sdtr_buf;
1976 sdtr_buf.msg_type = MSG_EXTENDED;
1977 sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1978 sdtr_buf.msg_req = MSG_EXT_SDTR;
1979 sdtr_buf.xfer_period = sdtr_period;
1980 sdtr_offset &= ADV_SYN_MAX_OFFSET;
1981 sdtr_buf.req_ack_offset = sdtr_offset;
1982 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1983 (u_int16_t *) &sdtr_buf,
1984 sizeof(sdtr_buf) / 2);
1988 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1989 u_int32_t status, int queued_only)
1993 struct adv_q_done_info scsiq_buf;
1994 struct adv_q_done_info *scsiq;
1999 target_ix = ADV_TIDLUN_TO_IX(target, lun);
2001 for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
2002 struct adv_ccb_info *ccb_info;
2003 q_addr = ADV_QNO_TO_QADDR(q_no);
2005 adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
2006 ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
2007 if (((scsiq->q_status & QS_READY) != 0)
2008 && ((scsiq->q_status & QS_ABORTED) == 0)
2009 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
2010 && (scsiq->d2.target_ix == target_ix)
2011 && (queued_only == 0
2012 || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
2013 && (ccb == NULL || (ccb == ccb_info->ccb))) {
2014 union ccb *aborted_ccb;
2015 struct adv_ccb_info *cinfo;
2017 scsiq->q_status |= QS_ABORTED;
2018 adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2020 aborted_ccb = ccb_info->ccb;
2021 /* Don't clobber earlier error codes */
2022 if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2024 aborted_ccb->ccb_h.status |= status;
2025 cinfo = (struct adv_ccb_info *)
2026 aborted_ccb->ccb_h.ccb_cinfo_ptr;
2027 cinfo->state |= ACCB_ABORT_QUEUED;
2035 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2042 while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2045 adv_reset_chip(adv, initiate_bus_reset);
2046 adv_reinit_lram(adv);
2047 for (i = 0; i <= ADV_MAX_TID; i++)
2048 adv_set_syncrate(adv, NULL, i, /*period*/0,
2049 /*offset*/0, ADV_TRANS_CUR);
2050 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2052 /* Tell the XPT layer that a bus reset occured */
2053 if (adv->path != NULL)
2054 xpt_async(AC_BUS_RESET, adv->path, NULL);
2057 while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2058 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2059 ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2060 adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2064 adv_start_chip(adv);
2069 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2073 adv_set_bank(adv, 1);
2074 orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2075 ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2076 if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2077 adv_set_bank(adv, 0);
2078 ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2080 adv_set_bank(adv, 1);
2081 ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2082 adv_set_bank(adv, 0);