2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
35 * Copyright (c) 1995-1996 Advanced System Products, Inc.
36 * All Rights Reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that redistributions of source
40 * code retain the above copyright notice and this comment without
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
50 #include <sys/kernel.h>
51 #include <sys/mutex.h>
52 #include <sys/systm.h>
54 #include <machine/bus.h>
55 #include <machine/resource.h>
60 #include <cam/cam_ccb.h>
61 #include <cam/cam_sim.h>
62 #include <cam/cam_xpt_sim.h>
64 #include <cam/scsi/scsi_all.h>
65 #include <cam/scsi/scsi_message.h>
66 #include <cam/scsi/scsi_da.h>
67 #include <cam/scsi/scsi_cd.h>
70 #include <vm/vm_param.h>
73 #include <dev/advansys/advansys.h>
74 #include <dev/advansys/advmcode.h>
76 struct adv_quirk_entry {
77 struct scsi_inquiry_pattern inq_pat;
79 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS 0x01
80 #define ADV_QUIRK_FIX_ASYN_XFER 0x02
83 static struct adv_quirk_entry adv_quirk_table[] =
86 { T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
87 ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
90 { T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
95 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
96 "TANDBERG", " TDC 36", "*"
101 { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
106 T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
113 T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
119 /* Default quirk entry */
121 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
122 /*vendor*/"*", /*product*/"*", /*revision*/"*"
124 ADV_QUIRK_FIX_ASYN_XFER,
129 * Allowable periods in ns
131 static u_int8_t adv_sdtr_period_tbl[] =
143 static u_int8_t adv_sdtr_period_tbl_ultra[] =
169 u_int8_t sdtr_xfer_period;
170 u_int8_t sdtr_req_ack_offset;
182 #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period
183 #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset
184 #define wdtr_width u_ext_msg.wdtr.wdtr_width
185 #define mdp_b3 u_ext_msg.mdp_b3
186 #define mdp_b2 u_ext_msg.mdp_b2
187 #define mdp_b1 u_ext_msg.mdp_b1
188 #define mdp_b0 u_ext_msg.mdp_b0
191 * Some of the early PCI adapters have problems with
192 * async transfers. Instead use an offset of 1.
194 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
197 static void adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
198 u_int16_t *buffer, int count);
199 static void adv_write_lram_16_multi(struct adv_softc *adv,
200 u_int16_t s_addr, u_int16_t *buffer,
202 static void adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
203 u_int16_t set_value, int count);
204 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
207 static int adv_write_and_verify_lram_16(struct adv_softc *adv,
208 u_int16_t addr, u_int16_t value);
209 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
212 static void adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
214 static void adv_write_lram_32_multi(struct adv_softc *adv,
215 u_int16_t s_addr, u_int32_t *buffer,
218 /* EEPROM routines */
219 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
220 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
222 static int adv_write_eeprom_cmd_reg(struct adv_softc *adv,
224 static int adv_set_eeprom_config_once(struct adv_softc *adv,
225 struct adv_eeprom_config *eeconfig);
228 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
229 u_int16_t *mcode_buf, u_int16_t mcode_size);
231 static void adv_reinit_lram(struct adv_softc *adv);
232 static void adv_init_lram(struct adv_softc *adv);
233 static int adv_init_microcode_var(struct adv_softc *adv);
234 static void adv_init_qlink_var(struct adv_softc *adv);
237 static void adv_disable_interrupt(struct adv_softc *adv);
238 static void adv_enable_interrupt(struct adv_softc *adv);
239 static void adv_toggle_irq_act(struct adv_softc *adv);
242 static int adv_host_req_chip_halt(struct adv_softc *adv);
243 static void adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
245 static u_int8_t adv_get_chip_scsi_ctrl(struct adv_softc *adv);
248 /* Queue handling and execution */
250 adv_sgcount_to_qcount(int sgcount);
253 adv_sgcount_to_qcount(int sgcount)
257 n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
258 if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
260 return (n_sg_list_qs + 1);
263 #if BYTE_ORDER == BIG_ENDIAN
264 static void adv_adj_endian_qdone_info(struct adv_q_done_info *);
265 static void adv_adj_scsiq_endian(struct adv_scsi_q *);
267 static void adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
268 u_int16_t *inbuf, int words);
269 static u_int adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
270 static u_int8_t adv_alloc_free_queues(struct adv_softc *adv,
271 u_int8_t free_q_head, u_int8_t n_free_q);
272 static u_int8_t adv_alloc_free_queue(struct adv_softc *adv,
273 u_int8_t free_q_head);
274 static int adv_send_scsi_queue(struct adv_softc *adv,
275 struct adv_scsi_q *scsiq,
276 u_int8_t n_q_required);
277 static void adv_put_ready_sg_list_queue(struct adv_softc *adv,
278 struct adv_scsi_q *scsiq,
280 static void adv_put_ready_queue(struct adv_softc *adv,
281 struct adv_scsi_q *scsiq, u_int q_no);
282 static void adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
283 u_int16_t *buffer, int words);
286 static void adv_handle_extmsg_in(struct adv_softc *adv,
287 u_int16_t halt_q_addr, u_int8_t q_cntl,
288 target_bit_vector target_id,
290 static void adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
291 u_int8_t sdtr_offset);
292 static void adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
296 /* Exported functions first */
299 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
301 struct adv_softc *adv;
303 adv = (struct adv_softc *)callback_arg;
304 mtx_assert(&adv->lock, MA_OWNED);
306 case AC_FOUND_DEVICE:
308 struct ccb_getdev *cgd;
309 target_bit_vector target_mask;
312 struct adv_quirk_entry *entry;
313 struct adv_target_transinfo* tinfo;
315 cgd = (struct ccb_getdev *)arg;
317 target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
319 num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
320 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
321 (caddr_t)adv_quirk_table,
322 num_entries, sizeof(*adv_quirk_table),
326 panic("advasync: device didn't match wildcard entry!!");
328 entry = (struct adv_quirk_entry *)match;
330 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
331 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
332 adv->fix_asyn_xfer_always |= target_mask;
334 adv->fix_asyn_xfer_always &= ~target_mask;
336 * We start out life with all bits set and clear them
337 * after we've determined that the fix isn't necessary.
338 * It may well be that we've already cleared a target
339 * before the full inquiry session completes, so don't
340 * gratuitously set a target bit even if it has this
341 * quirk. But, if the quirk exonerates a device, clear
344 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
345 adv->fix_asyn_xfer &= ~target_mask;
348 * Reset our sync settings now that we've determined
349 * what quirks are in effect for the device.
351 tinfo = &adv->tinfo[cgd->ccb_h.target_id];
352 adv_set_syncrate(adv, cgd->ccb_h.path,
353 cgd->ccb_h.target_id,
354 tinfo->current.period,
355 tinfo->current.offset,
363 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
364 target_mask = 0x01 << xpt_path_target_id(path);
365 adv->fix_asyn_xfer |= target_mask;
369 * Revert to async transfers
370 * for the next device.
372 adv_set_syncrate(adv, /*path*/NULL,
373 xpt_path_target_id(path),
376 ADV_TRANS_GOAL|ADV_TRANS_CUR);
384 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
389 * Start out with the bank reset to 0
391 control = ADV_INB(adv, ADV_CHIP_CTRL)
392 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
393 | ADV_CC_DIAG | ADV_CC_SCSI_RESET
394 | ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
396 control |= ADV_CC_BANK_ONE;
397 } else if (bank == 2) {
398 control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
400 ADV_OUTB(adv, ADV_CHIP_CTRL, control);
404 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
410 * LRAM is accessed on 16bit boundaries.
412 ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
413 word_data = ADV_INW(adv, ADV_LRAM_DATA);
415 #if BYTE_ORDER == BIG_ENDIAN
416 byte_data = (u_int8_t)(word_data & 0xFF);
418 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
421 #if BYTE_ORDER == BIG_ENDIAN
422 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
424 byte_data = (u_int8_t)(word_data & 0xFF);
431 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
435 word_data = adv_read_lram_16(adv, addr & 0xFFFE);
438 word_data |= (((u_int8_t)value << 8) & 0xFF00);
441 word_data |= ((u_int8_t)value & 0x00FF);
443 adv_write_lram_16(adv, addr & 0xFFFE, word_data);
448 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
450 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
451 return (ADV_INW(adv, ADV_LRAM_DATA));
455 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
457 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
458 ADV_OUTW(adv, ADV_LRAM_DATA, value);
462 * Determine if there is a board at "iobase" by looking
463 * for the AdvanSys signatures. Return 1 if a board is
464 * found, 0 otherwise.
467 adv_find_signature(struct resource *res)
471 if (bus_read_1(res, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
472 signature = bus_read_2(res, ADV_SIGNATURE_WORD);
473 if ((signature == ADV_1000_ID0W)
474 || (signature == ADV_1000_ID0W_FIX))
481 adv_lib_init(struct adv_softc *adv)
483 if ((adv->type & ADV_ULTRA) != 0) {
484 adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
485 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
487 adv->sdtr_period_tbl = adv_sdtr_period_tbl;
488 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
493 adv_get_eeprom_config(struct adv_softc *adv, struct
494 adv_eeprom_config *eeprom_config)
502 wbuf = (u_int16_t *)eeprom_config;
505 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
506 *wbuf = adv_read_eeprom_16(adv, s_addr);
510 if (adv->type & ADV_VL) {
511 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
512 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
514 cfg_beg = ADV_EEPROM_CFG_BEG;
515 cfg_end = ADV_EEPROM_MAX_ADDR;
518 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
519 *wbuf = adv_read_eeprom_16(adv, s_addr);
521 #ifdef ADV_DEBUG_EEPROM
522 printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
525 *wbuf = adv_read_eeprom_16(adv, s_addr);
530 adv_set_eeprom_config(struct adv_softc *adv,
531 struct adv_eeprom_config *eeprom_config)
537 if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
540 if (++retry > ADV_EEPROM_MAX_RETRY) {
544 return (retry > ADV_EEPROM_MAX_RETRY);
548 adv_reset_chip(struct adv_softc *adv, int reset_bus)
551 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
552 | (reset_bus ? ADV_CC_SCSI_RESET : 0));
555 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
556 adv_set_chip_ih(adv, ADV_INS_HALT);
559 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
561 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
565 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
566 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
567 return (adv_is_chip_halted(adv));
571 adv_test_external_lram(struct adv_softc* adv)
574 u_int16_t saved_value;
579 q_addr = ADV_QNO_TO_QADDR(241);
580 saved_value = adv_read_lram_16(adv, q_addr);
581 if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
583 adv_write_lram_16(adv, q_addr, saved_value);
590 adv_init_lram_and_mcode(struct adv_softc *adv)
594 adv_disable_interrupt(adv);
598 retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
600 if (retval != adv_mcode_chksum) {
601 device_printf(adv->dev,
602 "Microcode download failed checksum!\n");
606 if (adv_init_microcode_var(adv) != 0)
609 adv_enable_interrupt(adv);
614 adv_get_chip_irq(struct adv_softc *adv)
619 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
621 if ((adv->type & ADV_VL) != 0) {
622 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
623 if ((chip_irq == 0) ||
628 return (chip_irq + (ADV_MIN_IRQ_NO - 1));
630 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
633 return (chip_irq + ADV_MIN_IRQ_NO);
637 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
641 if ((adv->type & ADV_VL) != 0) {
643 if ((irq_no < ADV_MIN_IRQ_NO)
644 || (irq_no > ADV_MAX_IRQ_NO)) {
647 irq_no -= ADV_MIN_IRQ_NO - 1;
650 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
652 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
653 adv_toggle_irq_act(adv);
655 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
656 cfg_lsw |= (irq_no & 0x07) << 2;
657 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
658 adv_toggle_irq_act(adv);
659 } else if ((adv->type & ADV_ISA) != 0) {
662 irq_no -= ADV_MIN_IRQ_NO;
663 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
664 cfg_lsw |= (irq_no & 0x03) << 2;
665 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
667 return (adv_get_chip_irq(adv));
671 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
675 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
676 if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
678 cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
679 cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
680 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
684 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
687 struct adv_target_transinfo* tinfo;
688 u_int32_t *p_data_addr;
689 u_int32_t *p_data_bcount;
690 int disable_syn_offset_one_fix;
694 u_int8_t sg_entry_cnt;
696 u_int8_t sg_entry_cnt_minus_one;
700 mtx_assert(&adv->lock, MA_OWNED);
702 retval = 1; /* Default to error case */
703 target_ix = scsiq->q2.target_ix;
704 tid_no = ADV_TIX_TO_TID(target_ix);
705 tinfo = &adv->tinfo[tid_no];
707 if (scsiq->cdbptr[0] == REQUEST_SENSE) {
708 /* Renegotiate if appropriate. */
709 adv_set_syncrate(adv, /*struct cam_path */NULL,
710 tid_no, /*period*/0, /*offset*/0,
712 if (tinfo->current.period != tinfo->goal.period) {
713 adv_msgout_sdtr(adv, tinfo->goal.period,
715 scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
719 if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
720 sg_entry_cnt = scsiq->sg_head->entry_cnt;
721 sg_entry_cnt_minus_one = sg_entry_cnt - 1;
724 if (sg_entry_cnt <= 1)
725 panic("adv_execute_scsi_queue: Queue "
726 "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
728 if (sg_entry_cnt > ADV_MAX_SG_LIST)
729 panic("adv_execute_scsi_queue: "
730 "Queue with too many segs.");
732 if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
735 for (i = 0; i < sg_entry_cnt_minus_one; i++) {
736 addr = scsiq->sg_head->sg_list[i].addr +
737 scsiq->sg_head->sg_list[i].bytes;
739 if ((addr & 0x0003) != 0)
740 panic("adv_execute_scsi_queue: SG "
741 "with odd address or byte count");
746 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
748 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
750 n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
751 scsiq->sg_head->queue_cnt = n_q_required - 1;
753 p_data_addr = &scsiq->q1.data_addr;
754 p_data_bcount = &scsiq->q1.data_cnt;
758 disable_syn_offset_one_fix = FALSE;
760 if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
761 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
765 disable_syn_offset_one_fix = TRUE;
767 if (scsiq->cdbptr[0] == INQUIRY
768 || scsiq->cdbptr[0] == REQUEST_SENSE
769 || scsiq->cdbptr[0] == READ_CAPACITY
770 || scsiq->cdbptr[0] == MODE_SELECT_6
771 || scsiq->cdbptr[0] == MODE_SENSE_6
772 || scsiq->cdbptr[0] == MODE_SENSE_10
773 || scsiq->cdbptr[0] == MODE_SELECT_10
774 || scsiq->cdbptr[0] == READ_TOC) {
775 disable_syn_offset_one_fix = TRUE;
781 if (disable_syn_offset_one_fix) {
782 scsiq->q2.tag_code &=
783 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
784 scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
785 | ADV_TAG_FLAG_DISABLE_DISCONNECT);
788 if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
789 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
790 u_int8_t extra_bytes;
792 addr = *p_data_addr + *p_data_bcount;
793 extra_bytes = addr & 0x0003;
795 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
796 || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
797 scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
798 scsiq->q1.extra_bytes = extra_bytes;
799 *p_data_bcount -= extra_bytes;
803 if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
804 || ((scsiq->q1.cntl & QC_URGENT) != 0))
805 retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
812 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
813 struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
816 u_int8_t sg_queue_cnt;
818 adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
820 (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
822 #if BYTE_ORDER == BIG_ENDIAN
823 adv_adj_endian_qdone_info(scsiq);
826 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
827 scsiq->q_status = val & 0xFF;
828 scsiq->q_no = (val >> 8) & 0XFF;
830 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
831 scsiq->cntl = val & 0xFF;
832 sg_queue_cnt = (val >> 8) & 0xFF;
834 val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
835 scsiq->sense_len = val & 0xFF;
836 scsiq->extra_bytes = (val >> 8) & 0xFF;
839 * Due to a bug in accessing LRAM on the 940UA, the residual
840 * is split into separate high and low 16bit quantities.
842 scsiq->remain_bytes =
843 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
844 scsiq->remain_bytes |=
845 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
848 * XXX Is this just a safeguard or will the counter really
849 * have bogus upper bits?
851 scsiq->remain_bytes &= max_dma_count;
853 return (sg_queue_cnt);
857 adv_start_chip(struct adv_softc *adv)
859 ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
860 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
866 adv_stop_execution(struct adv_softc *adv)
871 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
872 adv_write_lram_8(adv, ADV_STOP_CODE_B,
873 ADV_STOP_REQ_RISC_STOP);
875 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
876 ADV_STOP_ACK_RISC_STOP) {
880 } while (count++ < 20);
886 adv_is_chip_halted(struct adv_softc *adv)
888 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
889 if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
897 * XXX The numeric constants and the loops in this routine
898 * need to be documented.
901 adv_ack_interrupt(struct adv_softc *adv)
909 risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
910 if (loop++ > 0x7FFF) {
913 } while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
915 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
916 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
917 host_flag | ADV_HOST_FLAG_ACK_INT);
919 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
921 while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
922 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
928 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
932 * Handle all conditions that may halt the chip waiting
933 * for us to intervene.
936 adv_isr_chip_halted(struct adv_softc *adv)
938 u_int16_t int_halt_code;
939 u_int16_t halt_q_addr;
940 target_bit_vector target_mask;
941 target_bit_vector scsi_busy;
948 mtx_assert(&adv->lock, MA_OWNED);
949 int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
950 halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
951 halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
952 target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
953 q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
954 tid_no = ADV_TIX_TO_TID(target_ix);
955 target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
956 if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
958 * Temporarily disable the async fix by removing
959 * this target from the list of affected targets,
960 * setting our async rate, and then putting us
961 * back into the mask.
963 adv->fix_asyn_xfer &= ~target_mask;
964 adv_set_syncrate(adv, /*struct cam_path */NULL,
965 tid_no, /*period*/0, /*offset*/0,
967 adv->fix_asyn_xfer |= target_mask;
968 } else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
969 adv_set_syncrate(adv, /*struct cam_path */NULL,
970 tid_no, /*period*/0, /*offset*/0,
972 } else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
973 adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
974 target_mask, tid_no);
975 } else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
976 struct adv_target_transinfo* tinfo;
977 struct adv_ccb_info *cinfo;
979 u_int32_t cinfo_index;
983 tinfo = &adv->tinfo[tid_no];
984 q_cntl |= QC_REQ_SENSE;
986 /* Renegotiate if appropriate. */
987 adv_set_syncrate(adv, /*struct cam_path */NULL,
988 tid_no, /*period*/0, /*offset*/0,
990 if (tinfo->current.period != tinfo->goal.period) {
991 adv_msgout_sdtr(adv, tinfo->goal.period,
993 q_cntl |= QC_MSG_OUT;
995 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
997 /* Don't tag request sense commands */
998 tag_code = adv_read_lram_8(adv,
999 halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
1001 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
1003 if ((adv->fix_asyn_xfer & target_mask) != 0
1004 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
1005 tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
1006 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
1008 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
1010 q_status = adv_read_lram_8(adv,
1011 halt_q_addr + ADV_SCSIQ_B_STATUS);
1012 q_status |= (QS_READY | QS_BUSY);
1013 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1016 * Freeze the devq until we can handle the sense condition.
1019 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1020 cinfo = &adv->ccb_infos[cinfo_index];
1021 ccb = adv->ccb_infos[cinfo_index].ccb;
1022 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1023 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1024 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1025 /*ccb*/NULL, CAM_REQUEUE_REQ,
1026 /*queued_only*/TRUE);
1027 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1028 scsi_busy &= ~target_mask;
1029 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1031 * Ensure we have enough time to actually
1032 * retrieve the sense.
1034 callout_reset(&cinfo->timer, 5 * hz, adv_timeout, ccb);
1035 } else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1036 struct ext_msg out_msg;
1038 adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1039 (u_int16_t *) &out_msg,
1042 if ((out_msg.msg_type == MSG_EXTENDED)
1043 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1044 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1046 /* Revert to Async */
1047 adv_set_syncrate(adv, /*struct cam_path */NULL,
1048 tid_no, /*period*/0, /*offset*/0,
1049 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1051 q_cntl &= ~QC_MSG_OUT;
1052 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1053 } else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1054 u_int8_t scsi_status;
1056 u_int32_t cinfo_index;
1058 scsi_status = adv_read_lram_8(adv, halt_q_addr
1059 + ADV_SCSIQ_SCSI_STATUS);
1061 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1062 ccb = adv->ccb_infos[cinfo_index].ccb;
1063 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1064 ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1065 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1066 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1067 /*ccb*/NULL, CAM_REQUEUE_REQ,
1068 /*queued_only*/TRUE);
1069 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1070 scsi_busy &= ~target_mask;
1071 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1073 printf("Unhandled Halt Code %x\n", int_halt_code);
1075 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1079 adv_sdtr_to_period_offset(struct adv_softc *adv,
1080 u_int8_t sync_data, u_int8_t *period,
1081 u_int8_t *offset, int tid)
1083 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1084 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1085 *period = *offset = 0;
1087 *period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1088 *offset = sync_data & 0xF;
1093 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1094 u_int tid, u_int period, u_int offset, u_int type)
1096 struct adv_target_transinfo* tinfo;
1101 mtx_assert(&adv->lock, MA_OWNED);
1102 tinfo = &adv->tinfo[tid];
1104 /* Filter our input */
1105 sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1108 old_period = tinfo->current.period;
1109 old_offset = tinfo->current.offset;
1111 if ((type & ADV_TRANS_CUR) != 0
1112 && ((old_period != period || old_offset != offset)
1113 || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1116 halted = adv_is_chip_halted(adv);
1118 /* Must halt the chip first */
1119 adv_host_req_chip_halt(adv);
1121 /* Update current hardware settings */
1122 adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1125 * If a target can run in sync mode, we don't need
1126 * to check it for sync problems.
1129 adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1132 /* Start the chip again */
1133 adv_start_chip(adv);
1135 tinfo->current.period = period;
1136 tinfo->current.offset = offset;
1140 * Tell the SCSI layer about the
1141 * new transfer parameters.
1143 struct ccb_trans_settings neg;
1144 memset(&neg, 0, sizeof (neg));
1145 struct ccb_trans_settings_spi *spi =
1146 &neg.xport_specific.spi;
1148 neg.protocol = PROTO_SCSI;
1149 neg.protocol_version = SCSI_REV_2;
1150 neg.transport = XPORT_SPI;
1151 neg.transport_version = 2;
1153 spi->sync_offset = offset;
1154 spi->sync_period = period;
1155 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1156 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1157 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1158 xpt_async(AC_TRANSFER_NEG, path, &neg);
1162 if ((type & ADV_TRANS_GOAL) != 0) {
1163 tinfo->goal.period = period;
1164 tinfo->goal.offset = offset;
1167 if ((type & ADV_TRANS_USER) != 0) {
1168 tinfo->user.period = period;
1169 tinfo->user.offset = offset;
1174 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1175 u_int *offset, int tid)
1181 if (offset == NULL) {
1183 offset = &dummy_offset;
1186 if (period == NULL) {
1188 period = &dummy_period;
1191 *offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1192 if (*period != 0 && *offset != 0) {
1193 for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1194 if (*period <= adv->sdtr_period_tbl[i]) {
1196 * When responding to a target that requests
1197 * sync, the requested rate may fall between
1198 * two rates that we can output, but still be
1199 * a rate that we can receive. Because of this,
1200 * we want to respond to the target with
1201 * the same rate that it sent to us even
1202 * if the period we use to send data to it
1203 * is lower. Only lower the response period
1206 if (i == 0 /* Our maximum rate */)
1207 *period = adv->sdtr_period_tbl[0];
1208 return ((i << 4) | *offset);
1216 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1217 return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1221 /* Internal Routines */
1224 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1225 u_int16_t *buffer, int count)
1227 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1228 ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1232 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1233 u_int16_t *buffer, int count)
1235 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1236 ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1240 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1241 u_int16_t set_value, int count)
1243 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1244 bus_set_multi_2(adv->res, adv->reg_off + ADV_LRAM_DATA,
1249 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1255 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1256 for (i = 0; i < count; i++)
1257 sum += ADV_INW(adv, ADV_LRAM_DATA);
1262 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1268 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1269 ADV_OUTW(adv, ADV_LRAM_DATA, value);
1271 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1272 if (value != ADV_INW(adv, ADV_LRAM_DATA))
1278 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1280 u_int16_t val_low, val_high;
1282 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1284 #if BYTE_ORDER == BIG_ENDIAN
1285 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1286 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1288 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1289 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1292 return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1296 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1298 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1300 #if BYTE_ORDER == BIG_ENDIAN
1301 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1302 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1304 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1305 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1310 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1311 u_int32_t *buffer, int count)
1313 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1314 ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1318 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1320 u_int16_t read_wval;
1323 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1325 cmd_reg = addr | ADV_EEPROM_CMD_READ;
1326 adv_write_eeprom_cmd_reg(adv, cmd_reg);
1328 read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1334 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1336 u_int16_t read_value;
1338 read_value = adv_read_eeprom_16(adv, addr);
1339 if (read_value != value) {
1340 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1343 ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1346 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1349 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1351 read_value = adv_read_eeprom_16(adv, addr);
1353 return (read_value);
1357 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1364 ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1366 read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1367 if (read_back == cmd_reg) {
1370 if (retry++ > ADV_EEPROM_MAX_RETRY) {
1377 adv_set_eeprom_config_once(struct adv_softc *adv,
1378 struct adv_eeprom_config *eeprom_config)
1387 wbuf = (u_int16_t *)eeprom_config;
1390 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1392 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1396 if (adv->type & ADV_VL) {
1397 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1398 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1400 cfg_beg = ADV_EEPROM_CFG_BEG;
1401 cfg_end = ADV_EEPROM_MAX_ADDR;
1404 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1406 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1411 if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1414 wbuf = (u_int16_t *)eeprom_config;
1415 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1416 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1420 for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1421 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1429 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1430 u_int16_t *mcode_buf, u_int16_t mcode_size)
1433 u_int16_t mcode_lram_size;
1434 u_int16_t mcode_chksum;
1436 mcode_lram_size = mcode_size >> 1;
1437 /* XXX Why zero the memory just before you write the whole thing?? */
1438 adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1439 adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1441 chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1442 mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1443 ((mcode_size - s_addr
1444 - ADV_CODE_SEC_BEG) >> 1));
1445 adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1446 adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1451 adv_reinit_lram(struct adv_softc *adv) {
1453 adv_init_qlink_var(adv);
1457 adv_init_lram(struct adv_softc *adv)
1462 adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1463 (((adv->max_openings + 2 + 1) * 64) >> 1));
1465 i = ADV_MIN_ACTIVE_QNO;
1466 s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1468 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1469 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1470 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1472 s_addr += ADV_QBLK_SIZE;
1473 for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1474 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1475 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1476 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1479 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1480 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1481 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1483 s_addr += ADV_QBLK_SIZE;
1485 for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1486 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1487 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1488 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1493 adv_init_microcode_var(struct adv_softc *adv)
1497 for (i = 0; i <= ADV_MAX_TID; i++) {
1499 /* Start out async all around */
1500 adv_set_syncrate(adv, /*path*/NULL,
1502 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1505 adv_init_qlink_var(adv);
1507 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1508 adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1510 adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1512 adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1514 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1515 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1516 device_printf(adv->dev,
1517 "Unable to set program counter. Aborting.\n");
1524 adv_init_qlink_var(struct adv_softc *adv)
1527 u_int16_t lram_addr;
1529 adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1530 adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1532 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1533 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1535 adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1536 (u_int8_t)((int) adv->max_openings + 1));
1537 adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1538 (u_int8_t)((int) adv->max_openings + 2));
1540 adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1542 adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1543 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1544 adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1545 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1546 adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1547 adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1549 lram_addr = ADV_QADR_BEG;
1550 for (i = 0; i < 32; i++, lram_addr += 2)
1551 adv_write_lram_16(adv, lram_addr, 0);
1555 adv_disable_interrupt(struct adv_softc *adv)
1559 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1560 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1564 adv_enable_interrupt(struct adv_softc *adv)
1568 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1569 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1573 adv_toggle_irq_act(struct adv_softc *adv)
1575 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1576 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1580 adv_start_execution(struct adv_softc *adv)
1582 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1583 adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1588 adv_stop_chip(struct adv_softc *adv)
1592 cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1593 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1594 ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1595 adv_set_chip_ih(adv, ADV_INS_HALT);
1596 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1597 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1604 adv_host_req_chip_halt(struct adv_softc *adv)
1607 u_int8_t saved_stop_code;
1609 if (adv_is_chip_halted(adv))
1613 saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1614 adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1615 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1616 while (adv_is_chip_halted(adv) == 0
1620 adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1621 return (count < 2000);
1625 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1627 adv_set_bank(adv, 1);
1628 ADV_OUTW(adv, ADV_REG_IH, ins_code);
1629 adv_set_bank(adv, 0);
1634 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1638 adv_set_bank(adv, 1);
1639 scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1640 adv_set_bank(adv, 0);
1646 * XXX Looks like more padding issues in this routine as well.
1647 * There has to be a way to turn this into an insw.
1650 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1651 u_int16_t *inbuf, int words)
1655 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1656 for (i = 0; i < words; i++, inbuf++) {
1660 *inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1665 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1670 cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1672 if ((cur_used_qs + n_qs) <= adv->max_openings) {
1673 cur_free_qs = adv->max_openings - cur_used_qs;
1674 return (cur_free_qs);
1676 adv->openings_needed = n_qs;
1681 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1686 for (i = 0; i < n_free_q; i++) {
1687 free_q_head = adv_alloc_free_queue(adv, free_q_head);
1688 if (free_q_head == ADV_QLINK_END)
1691 return (free_q_head);
1695 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1701 next_qp = ADV_QLINK_END;
1702 q_addr = ADV_QNO_TO_QADDR(free_q_head);
1703 q_status = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS);
1705 if ((q_status & QS_READY) == 0)
1706 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1712 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1713 u_int8_t n_q_required)
1715 u_int8_t free_q_head;
1722 target_ix = scsiq->q2.target_ix;
1723 tid_no = ADV_TIX_TO_TID(target_ix);
1724 free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1725 if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1727 scsiq->q1.q_no = free_q_head;
1730 * Now that we know our Q number, point our sense
1731 * buffer pointer to a bus dma mapped area where
1732 * we can dma the data to.
1734 scsiq->q1.sense_addr = adv->sense_physbase
1735 + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1736 adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1737 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1738 adv->cur_active += n_q_required;
1746 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1749 u_int8_t sg_list_dwords;
1750 u_int8_t sg_index, i;
1751 u_int8_t sg_entry_cnt;
1754 struct adv_sg_head *sg_head;
1755 struct adv_sg_list_q scsi_sg_q;
1757 sg_head = scsiq->sg_head;
1760 sg_entry_cnt = sg_head->entry_cnt - 1;
1762 if (sg_entry_cnt == 0)
1763 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1764 "a SG list but only one element");
1765 if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1766 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1767 "a SG list but QC_SG_HEAD not set");
1769 q_addr = ADV_QNO_TO_QADDR(q_no);
1771 scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1772 scsi_sg_q.sg_head_qp = q_no;
1773 scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1774 for (i = 0; i < sg_head->queue_cnt; i++) {
1775 u_int8_t segs_this_q;
1777 if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1778 segs_this_q = ADV_SG_LIST_PER_Q;
1780 /* This will be the last segment then */
1781 segs_this_q = sg_entry_cnt;
1782 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1784 scsi_sg_q.seq_no = i + 1;
1785 sg_list_dwords = segs_this_q << 1;
1787 scsi_sg_q.sg_list_cnt = segs_this_q;
1788 scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1790 scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1791 scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1793 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1794 scsi_sg_q.q_no = next_qp;
1795 q_addr = ADV_QNO_TO_QADDR(next_qp);
1797 adv_write_lram_16_multi(adv,
1798 q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1799 (u_int16_t *)&scsi_sg_q,
1800 sizeof(scsi_sg_q) >> 1);
1801 adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1802 (u_int32_t *)&sg_head->sg_list[sg_index],
1804 sg_entry_cnt -= segs_this_q;
1805 sg_index += ADV_SG_LIST_PER_Q;
1808 adv_put_ready_queue(adv, scsiq, q_no);
1812 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1815 struct adv_target_transinfo* tinfo;
1819 tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1820 tinfo = &adv->tinfo[tid_no];
1821 if ((tinfo->current.period != tinfo->goal.period)
1822 || (tinfo->current.offset != tinfo->goal.offset)) {
1824 adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1825 scsiq->q1.cntl |= QC_MSG_OUT;
1827 q_addr = ADV_QNO_TO_QADDR(q_no);
1829 scsiq->q1.status = QS_FREE;
1831 adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1832 (u_int16_t *)scsiq->cdbptr,
1833 scsiq->q2.cdb_len >> 1);
1835 #if BYTE_ORDER == BIG_ENDIAN
1836 adv_adj_scsiq_endian(scsiq);
1839 adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1840 (u_int16_t *) &scsiq->q1.cntl,
1841 ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1843 #ifdef CC_WRITE_IO_COUNT
1844 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1848 #ifdef CC_CLEAR_DMA_REMAIN
1850 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1851 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1854 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1855 (scsiq->q1.q_no << 8) | QS_READY);
1859 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1860 u_int16_t *buffer, int words)
1865 * XXX This routine makes *gross* assumptions
1866 * about padding in the data structures.
1867 * Either the data structures should have explicit
1868 * padding members added, or they should have padding
1869 * turned off via compiler attributes depending on
1870 * which yields better overall performance. My hunch
1871 * would be that turning off padding would be the
1872 * faster approach as an outsw is much faster than
1873 * this crude loop and accessing un-aligned data
1874 * members isn't *that* expensive. The other choice
1875 * would be to modify the ASC script so that the
1876 * the adv_scsiq_1 structure can be re-arranged so
1877 * padding isn't required.
1879 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1880 for (i = 0; i < words; i++, buffer++) {
1881 if (i == 2 || i == 10) {
1884 ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1888 #if BYTE_ORDER == BIG_ENDIAN
1890 adv_adj_endian_qdone_info(struct adv_q_done_info *scsiq)
1893 panic("adv(4) not supported on big-endian machines.\n");
1897 adv_adj_scsiq_endian(struct adv_scsi_q *scsiq)
1900 panic("adv(4) not supported on big-endian machines.\n");
1905 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1906 u_int8_t q_cntl, target_bit_vector target_mask,
1909 struct ext_msg ext_msg;
1911 adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1912 sizeof(ext_msg) >> 1);
1913 if ((ext_msg.msg_type == MSG_EXTENDED)
1914 && (ext_msg.msg_req == MSG_EXT_SDTR)
1915 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1917 struct adv_target_transinfo* tinfo;
1918 u_int32_t cinfo_index;
1922 u_int8_t orig_offset;
1925 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1926 ccb = adv->ccb_infos[cinfo_index].ccb;
1927 tinfo = &adv->tinfo[tid_no];
1930 orig_offset = ext_msg.req_ack_offset;
1931 if (ext_msg.xfer_period < tinfo->goal.period) {
1932 sdtr_accept = FALSE;
1933 ext_msg.xfer_period = tinfo->goal.period;
1936 /* Perform range checking */
1937 period = ext_msg.xfer_period;
1938 offset = ext_msg.req_ack_offset;
1939 adv_period_offset_to_sdtr(adv, &period, &offset, tid_no);
1940 ext_msg.xfer_period = period;
1941 ext_msg.req_ack_offset = offset;
1943 /* Record our current sync settings */
1944 adv_set_syncrate(adv, ccb->ccb_h.path,
1945 tid_no, ext_msg.xfer_period,
1946 ext_msg.req_ack_offset,
1947 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1949 /* Offset too high or large period forced async */
1950 if (orig_offset != ext_msg.req_ack_offset)
1951 sdtr_accept = FALSE;
1953 if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1954 /* Valid response to our requested negotiation */
1955 q_cntl &= ~QC_MSG_OUT;
1958 q_cntl |= QC_MSG_OUT;
1959 adv_msgout_sdtr(adv, ext_msg.xfer_period,
1960 ext_msg.req_ack_offset);
1963 } else if (ext_msg.msg_type == MSG_EXTENDED
1964 && ext_msg.msg_req == MSG_EXT_WDTR
1965 && ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1967 ext_msg.wdtr_width = 0;
1968 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1969 (u_int16_t *)&ext_msg,
1970 sizeof(ext_msg) >> 1);
1971 q_cntl |= QC_MSG_OUT;
1974 ext_msg.msg_type = MSG_MESSAGE_REJECT;
1975 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1976 (u_int16_t *)&ext_msg,
1977 sizeof(ext_msg) >> 1);
1978 q_cntl |= QC_MSG_OUT;
1980 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1984 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1985 u_int8_t sdtr_offset)
1987 struct ext_msg sdtr_buf;
1989 sdtr_buf.msg_type = MSG_EXTENDED;
1990 sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1991 sdtr_buf.msg_req = MSG_EXT_SDTR;
1992 sdtr_buf.xfer_period = sdtr_period;
1993 sdtr_offset &= ADV_SYN_MAX_OFFSET;
1994 sdtr_buf.req_ack_offset = sdtr_offset;
1995 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1996 (u_int16_t *) &sdtr_buf,
1997 sizeof(sdtr_buf) / 2);
2001 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
2002 u_int32_t status, int queued_only)
2006 struct adv_q_done_info scsiq_buf;
2007 struct adv_q_done_info *scsiq;
2012 mtx_assert(&adv->lock, MA_OWNED);
2014 target_ix = ADV_TIDLUN_TO_IX(target, lun);
2016 for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
2017 struct adv_ccb_info *ccb_info;
2018 q_addr = ADV_QNO_TO_QADDR(q_no);
2020 adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
2021 ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
2022 if (((scsiq->q_status & QS_READY) != 0)
2023 && ((scsiq->q_status & QS_ABORTED) == 0)
2024 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
2025 && (scsiq->d2.target_ix == target_ix)
2026 && (queued_only == 0
2027 || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
2028 && (ccb == NULL || (ccb == ccb_info->ccb))) {
2029 union ccb *aborted_ccb;
2030 struct adv_ccb_info *cinfo;
2032 scsiq->q_status |= QS_ABORTED;
2033 adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2035 aborted_ccb = ccb_info->ccb;
2036 /* Don't clobber earlier error codes */
2037 if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2039 aborted_ccb->ccb_h.status |= status;
2040 cinfo = (struct adv_ccb_info *)
2041 aborted_ccb->ccb_h.ccb_cinfo_ptr;
2042 cinfo->state |= ACCB_ABORT_QUEUED;
2050 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2057 mtx_assert(&adv->lock, MA_OWNED);
2059 while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2062 adv_reset_chip(adv, initiate_bus_reset);
2063 adv_reinit_lram(adv);
2064 for (i = 0; i <= ADV_MAX_TID; i++)
2065 adv_set_syncrate(adv, NULL, i, /*period*/0,
2066 /*offset*/0, ADV_TRANS_CUR);
2067 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2069 /* Tell the XPT layer that a bus reset occured */
2070 if (adv->path != NULL)
2071 xpt_async(AC_BUS_RESET, adv->path, NULL);
2074 while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2075 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2076 ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2077 adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2081 adv_start_chip(adv);
2086 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2090 adv_set_bank(adv, 1);
2091 orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2092 ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2093 if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2094 adv_set_bank(adv, 0);
2095 ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2097 adv_set_bank(adv, 1);
2098 ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2099 adv_set_bank(adv, 0);