2 * Generic driver for the Advanced Systems Inc. SCSI controllers
3 * Product specific probe and attach routines can be found in:
5 * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852
6 * i386/eisa/adv_eisa.c ABP742, ABP752
7 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U,
8 * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA,
11 * Copyright (c) 1996-2000 Justin Gibbs.
12 * All rights reserved.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions, and the following disclaimer,
19 * without modification, immediately at the beginning of the file.
20 * 2. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
39 * Copyright (c) 1995-1997 Advanced System Products, Inc.
40 * All Rights Reserved.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
51 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
57 #include <sys/module.h>
58 #include <sys/mutex.h>
60 #include <machine/bus.h>
61 #include <machine/resource.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_debug.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
75 #include <vm/vm_param.h>
78 #include <dev/advansys/advansys.h>
80 static void adv_action(struct cam_sim *sim, union ccb *ccb);
81 static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
82 int nsegments, int error);
83 static void adv_intr_locked(struct adv_softc *adv);
84 static void adv_poll(struct cam_sim *sim);
85 static void adv_run_doneq(struct adv_softc *adv);
86 static struct adv_ccb_info *
87 adv_alloc_ccb_info(struct adv_softc *adv);
88 static void adv_destroy_ccb_info(struct adv_softc *adv,
89 struct adv_ccb_info *cinfo);
90 static __inline struct adv_ccb_info *
91 adv_get_ccb_info(struct adv_softc *adv);
92 static __inline void adv_free_ccb_info(struct adv_softc *adv,
93 struct adv_ccb_info *cinfo);
94 static __inline void adv_set_state(struct adv_softc *adv, adv_state state);
95 static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb);
96 static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb);
98 static __inline struct adv_ccb_info *
99 adv_get_ccb_info(struct adv_softc *adv)
101 struct adv_ccb_info *cinfo;
104 mtx_assert(&adv->lock, MA_OWNED);
105 if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
106 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
108 cinfo = adv_alloc_ccb_info(adv);
115 adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
119 mtx_assert(&adv->lock, MA_OWNED);
120 cinfo->state = ACCB_FREE;
121 SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links);
125 adv_set_state(struct adv_softc *adv, adv_state state)
128 xpt_freeze_simq(adv->sim, /*count*/1);
133 adv_clear_state(struct adv_softc *adv, union ccb* ccb)
136 adv_clear_state_really(adv, ccb);
140 adv_clear_state_really(struct adv_softc *adv, union ccb* ccb)
144 mtx_assert(&adv->lock, MA_OWNED);
145 if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0)
146 adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK);
147 if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) {
150 openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q;
151 if (openings >= adv->openings_needed) {
152 adv->state &= ~ADV_RESOURCE_SHORTAGE;
153 adv->openings_needed = 0;
157 if ((adv->state & ADV_IN_TIMEOUT) != 0) {
158 struct adv_ccb_info *cinfo;
160 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
161 if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) {
162 struct ccb_hdr *ccb_h;
165 * We now traverse our list of pending CCBs
166 * and reinstate their timeouts.
168 ccb_h = LIST_FIRST(&adv->pending_ccbs);
169 while (ccb_h != NULL) {
170 cinfo = ccb_h->ccb_cinfo_ptr;
171 callout_reset(&cinfo->timer,
172 ccb_h->timeout * hz / 1000, adv_timeout,
174 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
176 adv->state &= ~ADV_IN_TIMEOUT;
177 device_printf(adv->dev, "No longer in timeout\n");
181 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
185 adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
187 bus_addr_t* physaddr;
189 physaddr = (bus_addr_t*)arg;
190 *physaddr = segs->ds_addr;
194 adv_action(struct cam_sim *sim, union ccb *ccb)
196 struct adv_softc *adv;
198 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n"));
200 adv = (struct adv_softc *)cam_sim_softc(sim);
201 mtx_assert(&adv->lock, MA_OWNED);
203 switch (ccb->ccb_h.func_code) {
204 /* Common cases first */
205 case XPT_SCSI_IO: /* Execute the requested I/O operation */
207 struct ccb_hdr *ccb_h;
208 struct ccb_scsiio *csio;
209 struct adv_ccb_info *cinfo;
214 cinfo = adv_get_ccb_info(adv);
216 panic("XXX Handle CCB info error!!!");
218 ccb_h->ccb_cinfo_ptr = cinfo;
221 error = bus_dmamap_load_ccb(adv->buffer_dmat,
226 if (error == EINPROGRESS) {
228 * So as to maintain ordering, freeze the controller
229 * queue until our mapping is returned.
231 adv_set_state(adv, ADV_BUSDMA_BLOCK);
235 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
236 case XPT_TARGET_IO: /* Execute target I/O request */
237 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
238 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/
239 case XPT_EN_LUN: /* Enable LUN as a target */
240 case XPT_ABORT: /* Abort the specified CCB */
242 ccb->ccb_h.status = CAM_REQ_INVALID;
245 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
246 #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS)
247 case XPT_SET_TRAN_SETTINGS:
249 struct ccb_trans_settings_scsi *scsi;
250 struct ccb_trans_settings_spi *spi;
251 struct ccb_trans_settings *cts;
252 target_bit_vector targ_mask;
253 struct adv_transinfo *tconf;
257 targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
261 * The user must specify which type of settings he wishes
264 if (IS_CURRENT_SETTINGS(cts) && !IS_USER_SETTINGS(cts)) {
265 tconf = &adv->tinfo[cts->ccb_h.target_id].current;
266 update_type |= ADV_TRANS_GOAL;
267 } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) {
268 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
269 update_type |= ADV_TRANS_USER;
271 ccb->ccb_h.status = CAM_REQ_INVALID;
275 scsi = &cts->proto_specific.scsi;
276 spi = &cts->xport_specific.spi;
277 if ((update_type & ADV_TRANS_GOAL) != 0) {
278 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
279 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
280 adv->disc_enable |= targ_mask;
282 adv->disc_enable &= ~targ_mask;
283 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B,
287 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
288 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
289 adv->cmd_qng_enabled |= targ_mask;
291 adv->cmd_qng_enabled &= ~targ_mask;
295 if ((update_type & ADV_TRANS_USER) != 0) {
296 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
297 if ((spi->flags & CTS_SPI_VALID_DISC) != 0)
298 adv->user_disc_enable |= targ_mask;
300 adv->user_disc_enable &= ~targ_mask;
303 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
304 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
305 adv->user_cmd_qng_enabled |= targ_mask;
307 adv->user_cmd_qng_enabled &= ~targ_mask;
312 * If the user specifies either the sync rate, or offset,
313 * but not both, the unspecified parameter defaults to its
314 * current value in transfer negotiations.
316 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
317 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
319 * If the user provided a sync rate but no offset,
320 * use the current offset.
322 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
323 spi->sync_offset = tconf->offset;
326 * If the user provided an offset but no sync rate,
327 * use the current sync rate.
329 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
330 spi->sync_period = tconf->period;
332 adv_period_offset_to_sdtr(adv, &spi->sync_period,
334 cts->ccb_h.target_id);
336 adv_set_syncrate(adv, /*struct cam_path */NULL,
337 cts->ccb_h.target_id, spi->sync_period,
338 spi->sync_offset, update_type);
341 ccb->ccb_h.status = CAM_REQ_CMP;
345 case XPT_GET_TRAN_SETTINGS:
346 /* Get default/user set transfer settings for the target */
348 struct ccb_trans_settings_scsi *scsi;
349 struct ccb_trans_settings_spi *spi;
350 struct ccb_trans_settings *cts;
351 struct adv_transinfo *tconf;
352 target_bit_vector target_mask;
355 target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
357 scsi = &cts->proto_specific.scsi;
358 spi = &cts->xport_specific.spi;
360 cts->protocol = PROTO_SCSI;
361 cts->protocol_version = SCSI_REV_2;
362 cts->transport = XPORT_SPI;
363 cts->transport_version = 2;
365 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
366 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
368 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
369 tconf = &adv->tinfo[cts->ccb_h.target_id].current;
370 if ((adv->disc_enable & target_mask) != 0)
371 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
372 if ((adv->cmd_qng_enabled & target_mask) != 0)
373 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
375 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
376 if ((adv->user_disc_enable & target_mask) != 0)
377 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
378 if ((adv->user_cmd_qng_enabled & target_mask) != 0)
379 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
381 spi->sync_period = tconf->period;
382 spi->sync_offset = tconf->offset;
383 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
384 spi->valid = CTS_SPI_VALID_SYNC_RATE
385 | CTS_SPI_VALID_SYNC_OFFSET
386 | CTS_SPI_VALID_BUS_WIDTH
387 | CTS_SPI_VALID_DISC;
388 scsi->valid = CTS_SCSI_VALID_TQ;
389 ccb->ccb_h.status = CAM_REQ_CMP;
393 case XPT_CALC_GEOMETRY:
397 extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0;
398 cam_calc_geometry(&ccb->ccg, extended);
402 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
405 adv_stop_execution(adv);
406 adv_reset_bus(adv, /*initiate_reset*/TRUE);
407 adv_start_execution(adv);
409 ccb->ccb_h.status = CAM_REQ_CMP;
413 case XPT_TERM_IO: /* Terminate the I/O process */
415 ccb->ccb_h.status = CAM_REQ_INVALID;
418 case XPT_PATH_INQ: /* Path routing inquiry */
420 struct ccb_pathinq *cpi = &ccb->cpi;
422 cpi->version_num = 1; /* XXX??? */
423 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
424 cpi->target_sprt = 0;
426 cpi->hba_eng_cnt = 0;
429 cpi->initiator_id = adv->scsi_id;
430 cpi->bus_id = cam_sim_bus(sim);
431 cpi->base_transfer_speed = 3300;
432 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
433 strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN);
434 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
435 cpi->unit_number = cam_sim_unit(sim);
436 cpi->ccb_h.status = CAM_REQ_CMP;
437 cpi->transport = XPORT_SPI;
438 cpi->transport_version = 2;
439 cpi->protocol = PROTO_SCSI;
440 cpi->protocol_version = SCSI_REV_2;
445 ccb->ccb_h.status = CAM_REQ_INVALID;
452 * Currently, the output of bus_dmammap_load suits our needs just
453 * fine, but should it change, we'd need to do something here.
455 #define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs)
458 adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
459 int nsegments, int error)
461 struct ccb_scsiio *csio;
462 struct ccb_hdr *ccb_h;
464 struct adv_softc *adv;
465 struct adv_ccb_info *cinfo;
466 struct adv_scsi_q scsiq;
467 struct adv_sg_head sghead;
469 csio = (struct ccb_scsiio *)arg;
470 ccb_h = &csio->ccb_h;
471 sim = xpt_path_sim(ccb_h->path);
472 adv = (struct adv_softc *)cam_sim_softc(sim);
473 cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;
475 mtx_assert(&adv->lock, MA_OWNED);
478 * Setup our done routine to release the simq on
479 * the next ccb that completes.
481 if ((adv->state & ADV_BUSDMA_BLOCK) != 0)
482 adv->state |= ADV_BUSDMA_BLOCK_CLEARED;
484 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
485 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) {
486 /* XXX Need phystovirt!!!! */
487 /* How about pmap_kenter??? */
488 scsiq.cdbptr = csio->cdb_io.cdb_ptr;
490 scsiq.cdbptr = csio->cdb_io.cdb_ptr;
493 scsiq.cdbptr = csio->cdb_io.cdb_bytes;
496 * Build up the request
501 scsiq.q1.sg_queue_cnt = 0;
502 scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id);
503 scsiq.q1.target_lun = ccb_h->target_lun;
504 scsiq.q1.sense_len = csio->sense_len;
505 scsiq.q1.extra_bytes = 0;
506 scsiq.q2.ccb_index = cinfo - adv->ccb_infos;
507 scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id,
510 scsiq.q2.cdb_len = csio->cdb_len;
511 if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0)
512 scsiq.q2.tag_code = csio->tag_action;
514 scsiq.q2.tag_code = 0;
517 if (nsegments != 0) {
520 scsiq.q1.data_addr = dm_segs->ds_addr;
521 scsiq.q1.data_cnt = dm_segs->ds_len;
523 scsiq.q1.cntl |= QC_SG_HEAD;
525 = sghead.entry_to_copy
528 sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs);
529 scsiq.sg_head = &sghead;
531 scsiq.sg_head = NULL;
533 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN)
534 op = BUS_DMASYNC_PREREAD;
536 op = BUS_DMASYNC_PREWRITE;
537 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
539 scsiq.q1.data_addr = 0;
540 scsiq.q1.data_cnt = 0;
541 scsiq.sg_head = NULL;
545 * Last time we need to check if this SCB needs to
548 if (ccb_h->status != CAM_REQ_INPROG) {
550 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
551 adv_clear_state(adv, (union ccb *)csio);
552 adv_free_ccb_info(adv, cinfo);
553 xpt_done((union ccb *)csio);
557 if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
558 /* Temporary resource shortage */
559 adv_set_state(adv, ADV_RESOURCE_SHORTAGE);
561 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
562 csio->ccb_h.status = CAM_REQUEUE_REQ;
563 adv_clear_state(adv, (union ccb *)csio);
564 adv_free_ccb_info(adv, cinfo);
565 xpt_done((union ccb *)csio);
568 cinfo->state |= ACCB_ACTIVE;
569 ccb_h->status |= CAM_SIM_QUEUED;
570 LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
571 /* Schedule our timeout */
572 callout_reset(&cinfo->timer, ccb_h->timeout * hz /1000, adv_timeout,
576 static struct adv_ccb_info *
577 adv_alloc_ccb_info(struct adv_softc *adv)
580 struct adv_ccb_info *cinfo;
582 cinfo = &adv->ccb_infos[adv->ccb_infos_allocated];
583 cinfo->state = ACCB_FREE;
584 callout_init_mtx(&cinfo->timer, &adv->lock, 0);
585 error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
588 device_printf(adv->dev, "Unable to allocate CCB info "
589 "dmamap - error %d\n", error);
592 adv->ccb_infos_allocated++;
597 adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
600 callout_drain(&cinfo->timer);
601 bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap);
605 adv_timeout(void *arg)
608 struct adv_softc *adv;
609 struct adv_ccb_info *cinfo, *cinfo2;
611 ccb = (union ccb *)arg;
612 adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc;
613 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
614 mtx_assert(&adv->lock, MA_OWNED);
616 xpt_print_path(ccb->ccb_h.path);
617 printf("Timed out\n");
619 /* Have we been taken care of already?? */
620 if (cinfo == NULL || cinfo->state == ACCB_FREE) {
624 adv_stop_execution(adv);
626 if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) {
627 struct ccb_hdr *ccb_h;
630 * In order to simplify the recovery process, we ask the XPT
631 * layer to halt the queue of new transactions and we traverse
632 * the list of pending CCBs and remove their timeouts. This
633 * means that the driver attempts to clear only one error
634 * condition at a time. In general, timeouts that occur
635 * close together are related anyway, so there is no benefit
636 * in attempting to handle errors in parrallel. Timeouts will
637 * be reinstated when the recovery process ends.
639 adv_set_state(adv, ADV_IN_TIMEOUT);
641 /* This CCB is the CCB representing our recovery actions */
642 cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED;
644 ccb_h = LIST_FIRST(&adv->pending_ccbs);
645 while (ccb_h != NULL) {
646 cinfo2 = ccb_h->ccb_cinfo_ptr;
647 callout_stop(&cinfo2->timer);
648 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
651 /* XXX Should send a BDR */
652 /* Attempt an abort as our first tact */
653 xpt_print_path(ccb->ccb_h.path);
654 printf("Attempting abort\n");
655 adv_abort_ccb(adv, ccb->ccb_h.target_id,
656 ccb->ccb_h.target_lun, ccb,
657 CAM_CMD_TIMEOUT, /*queued_only*/FALSE);
658 callout_reset(&cinfo->timer, 2 * hz, adv_timeout, ccb);
660 /* Our attempt to perform an abort failed, go for a reset */
661 xpt_print_path(ccb->ccb_h.path);
662 printf("Resetting bus\n");
663 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
664 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
665 adv_reset_bus(adv, /*initiate_reset*/TRUE);
667 adv_start_execution(adv);
671 adv_alloc(device_t dev, struct resource *res, long offset)
673 struct adv_softc *adv = device_get_softc(dev);
676 * Allocate a storage area for us
678 LIST_INIT(&adv->pending_ccbs);
679 SLIST_INIT(&adv->free_ccb_infos);
682 adv->reg_off = offset;
683 mtx_init(&adv->lock, "adv", NULL, MTX_DEF);
689 adv_free(struct adv_softc *adv)
691 switch (adv->init_level) {
694 struct adv_ccb_info *cinfo;
696 while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
697 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
698 adv_destroy_ccb_info(adv, cinfo);
701 bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap);
704 bus_dmamem_free(adv->sense_dmat, adv->sense_buffers,
707 bus_dma_tag_destroy(adv->sense_dmat);
709 bus_dma_tag_destroy(adv->buffer_dmat);
711 bus_dma_tag_destroy(adv->parent_dmat);
713 if (adv->ccb_infos != NULL)
714 free(adv->ccb_infos, M_DEVBUF);
716 mtx_destroy(&adv->lock);
722 adv_init(struct adv_softc *adv)
724 struct adv_eeprom_config eeprom_config;
727 u_int16_t config_lsw;
728 u_int16_t config_msw;
730 mtx_lock(&adv->lock);
734 * Stop script execution.
736 adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE);
737 adv_stop_execution(adv);
738 if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) {
739 mtx_unlock(&adv->lock);
740 device_printf(adv->dev,
741 "Unable to halt adapter. Initialization failed\n");
744 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
745 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
746 mtx_unlock(&adv->lock);
747 device_printf(adv->dev,
748 "Unable to set program counter. Initialization failed\n");
752 config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
753 config_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
755 if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) {
756 config_msw &= ~ADV_CFG_MSW_CLR_MASK;
758 * XXX The Linux code flags this as an error,
759 * but what should we report to the user???
760 * It seems that clearing the config register
761 * makes this error recoverable.
763 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
766 /* Suck in the configuration from the EEProm */
767 checksum = adv_get_eeprom_config(adv, &eeprom_config);
769 if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) {
771 * XXX The Linux code sets a warning level for this
772 * condition, yet nothing of meaning is printed to
773 * the user. What does this mean???
775 if (adv->chip_version == 3) {
776 if (eeprom_config.cfg_lsw != config_lsw)
777 eeprom_config.cfg_lsw = config_lsw;
778 if (eeprom_config.cfg_msw != config_msw) {
779 eeprom_config.cfg_msw = config_msw;
783 if (checksum == eeprom_config.chksum) {
785 /* Range/Sanity checking */
786 if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) {
787 eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG;
789 if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) {
790 eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG;
792 if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) {
793 eeprom_config.max_tag_qng = eeprom_config.max_total_qng;
795 if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) {
796 eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC;
798 adv->max_openings = eeprom_config.max_total_qng;
799 adv->user_disc_enable = eeprom_config.disc_enable;
800 adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng;
801 adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config);
802 adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID;
803 EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id);
804 adv->control = eeprom_config.cntl;
805 for (i = 0; i <= ADV_MAX_TID; i++) {
808 if ((eeprom_config.init_sdtr & (0x1 << i)) == 0)
811 sync_data = eeprom_config.sdtr_data[i];
812 adv_sdtr_to_period_offset(adv,
814 &adv->tinfo[i].user.period,
815 &adv->tinfo[i].user.offset,
818 config_lsw = eeprom_config.cfg_lsw;
819 eeprom_config.cfg_msw = config_msw;
823 device_printf(adv->dev, "Warning EEPROM Checksum mismatch. "
824 "Using default device parameters\n");
826 /* Set reasonable defaults since we can't read the EEPROM */
827 adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1;
828 adv->max_openings = ADV_DEF_MAX_TOTAL_QNG;
829 adv->disc_enable = TARGET_BIT_VECTOR_SET;
830 adv->user_disc_enable = TARGET_BIT_VECTOR_SET;
831 adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
832 adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
834 adv->control = 0xFFFF;
836 if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050)
837 /* Default to no Ultra to support the 3030 */
838 adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA;
839 sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4);
840 for (i = 0; i <= ADV_MAX_TID; i++) {
841 adv_sdtr_to_period_offset(adv, sync_data,
842 &adv->tinfo[i].user.period,
843 &adv->tinfo[i].user.offset,
846 config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON;
848 config_msw &= ~ADV_CFG_MSW_CLR_MASK;
849 config_lsw |= ADV_CFG_LSW_HOST_INT_ON;
850 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)
851 && (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0)
857 for (i = 0; i <= ADV_MAX_TID; i++) {
858 if (adv->tinfo[i].user.period < max_sync)
859 adv->tinfo[i].user.period = max_sync;
862 if (adv_test_external_lram(adv) == 0) {
863 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) {
864 eeprom_config.max_total_qng =
865 ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG;
866 eeprom_config.max_tag_qng =
867 ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG;
869 eeprom_config.cfg_msw |= 0x0800;
870 config_msw |= 0x0800;
871 eeprom_config.max_total_qng =
872 ADV_MAX_PCI_INRAM_TOTAL_QNG;
873 eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG;
875 adv->max_openings = eeprom_config.max_total_qng;
877 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
878 ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw);
881 * Don't write the eeprom data back for now.
882 * I'd rather not mess up the user's card. We also don't
883 * fully sanitize the eeprom settings above for the write-back
884 * to be 100% correct.
886 if (adv_set_eeprom_config(adv, &eeprom_config) != 0)
887 device_printf(adv->dev,
888 "WARNING! Failure writing to EEPROM.\n");
891 adv_set_chip_scsiid(adv, adv->scsi_id);
892 if (adv_init_lram_and_mcode(adv)) {
893 mtx_unlock(&adv->lock);
897 adv->disc_enable = adv->user_disc_enable;
899 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
900 for (i = 0; i <= ADV_MAX_TID; i++) {
902 * Start off in async mode.
904 adv_set_syncrate(adv, /*struct cam_path */NULL,
905 i, /*period*/0, /*offset*/0,
908 * Enable the use of tagged commands on all targets.
909 * This allows the kernel driver to make up it's own mind
910 * as it sees fit to tag queue instead of having the
911 * firmware try and second guess the tag_code settins.
913 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i,
916 adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
917 adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
918 device_printf(adv->dev,
919 "AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n",
920 (adv->type & ADV_ULTRA) && (max_sync == 0)
921 ? "Ultra SCSI" : "SCSI",
922 adv->scsi_id, adv->max_openings);
923 mtx_unlock(&adv->lock);
930 struct adv_softc *adv;
933 mtx_lock(&adv->lock);
934 adv_intr_locked(adv);
935 mtx_unlock(&adv->lock);
939 adv_intr_locked(struct adv_softc *adv)
942 u_int16_t saved_ram_addr;
944 u_int8_t saved_ctrl_reg;
948 mtx_assert(&adv->lock, MA_OWNED);
949 chipstat = ADV_INW(adv, ADV_CHIP_STATUS);
952 if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0)
955 ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL);
956 saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET |
957 ADV_CC_SINGLE_STEP | ADV_CC_DIAG |
960 if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) {
961 device_printf(adv->dev, "Detected Bus Reset\n");
962 adv_reset_bus(adv, /*initiate_reset*/FALSE);
966 if ((chipstat & ADV_CSW_INT_PENDING) != 0) {
968 saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR);
969 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
970 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
971 host_flag | ADV_HOST_FLAG_IN_ISR);
973 adv_ack_interrupt(adv);
975 if ((chipstat & ADV_CSW_HALTED) != 0
976 && (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) {
977 adv_isr_chip_halted(adv);
978 saved_ctrl_reg &= ~ADV_CC_HALT;
982 ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr);
984 if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr)
985 panic("adv_intr: Unable to set LRAM addr");
987 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
990 ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg);
994 adv_run_doneq(struct adv_softc *adv)
996 struct adv_q_done_info scsiq;
1000 doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF;
1001 done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head)
1003 while (done_qno != ADV_QLINK_END) {
1005 struct adv_ccb_info *cinfo;
1010 done_qaddr = ADV_QNO_TO_QADDR(done_qno);
1012 /* Pull status from this request */
1013 sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq,
1014 adv->max_dma_count);
1016 /* Mark it as free */
1017 adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS,
1018 scsiq.q_status & ~(QS_READY|QS_ABORTED));
1020 /* Process request based on retrieved info */
1021 if ((scsiq.cntl & QC_SG_HEAD) != 0) {
1025 * S/G based request. Free all of the queue
1026 * structures that contained S/G information.
1028 for (i = 0; i < sg_queue_cnt; i++) {
1029 done_qno = adv_read_lram_8(adv, done_qaddr
1033 if (done_qno == ADV_QLINK_END) {
1034 panic("adv_qdone: Corrupted SG "
1035 "list encountered");
1038 done_qaddr = ADV_QNO_TO_QADDR(done_qno);
1040 /* Mark SG queue as free */
1041 adv_write_lram_8(adv, done_qaddr
1042 + ADV_SCSIQ_B_STATUS, QS_FREE);
1047 if (adv->cur_active < (sg_queue_cnt + 1))
1048 panic("adv_qdone: Attempting to free more "
1049 "queues than are active");
1051 adv->cur_active -= sg_queue_cnt + 1;
1053 aborted = (scsiq.q_status & QS_ABORTED) != 0;
1055 if ((scsiq.q_status != QS_DONE)
1056 && (scsiq.q_status & QS_ABORTED) == 0)
1057 panic("adv_qdone: completed scsiq with unknown status");
1059 scsiq.remain_bytes += scsiq.extra_bytes;
1061 if ((scsiq.d3.done_stat == QD_WITH_ERROR) &&
1062 (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) {
1063 if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) {
1064 scsiq.d3.done_stat = QD_NO_ERROR;
1065 scsiq.d3.host_stat = QHSTA_NO_ERROR;
1069 cinfo = &adv->ccb_infos[scsiq.d2.ccb_index];
1071 ccb->csio.resid = scsiq.remain_bytes;
1073 scsiq.d3.done_stat, scsiq.d3.host_stat,
1074 scsiq.d3.scsi_stat, scsiq.q_no);
1076 doneq_head = done_qno;
1077 done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD);
1079 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head);
1084 adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat,
1085 u_int host_stat, u_int scsi_status, u_int q_no)
1087 struct adv_ccb_info *cinfo;
1090 mtx_assert(&adv->lock, MA_OWNED);
1091 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
1092 LIST_REMOVE(&ccb->ccb_h, sim_links.le);
1093 callout_stop(&cinfo->timer);
1094 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1095 bus_dmasync_op_t op;
1097 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1098 op = BUS_DMASYNC_POSTREAD;
1100 op = BUS_DMASYNC_POSTWRITE;
1101 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
1102 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
1105 switch (done_stat) {
1107 if (host_stat == QHSTA_NO_ERROR) {
1108 ccb->ccb_h.status = CAM_REQ_CMP;
1111 xpt_print_path(ccb->ccb_h.path);
1112 printf("adv_done - queue done without error, "
1113 "but host status non-zero(%x)\n", host_stat);
1116 switch (host_stat) {
1117 case QHSTA_M_TARGET_STATUS_BUSY:
1118 case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY:
1120 * Assume that if we were a tagged transaction
1121 * the target reported queue full. Otherwise,
1122 * report busy. The firmware really should just
1123 * pass the original status back up to us even
1124 * if it thinks the target was in error for
1125 * returning this status as no other transactions
1126 * from this initiator are in effect, but this
1127 * ignores multi-initiator setups and there is
1128 * evidence that the firmware gets its per-device
1129 * transaction counts screwed up occassionally.
1131 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1132 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
1133 && host_stat != QHSTA_M_TARGET_STATUS_BUSY)
1134 scsi_status = SCSI_STATUS_QUEUE_FULL;
1136 scsi_status = SCSI_STATUS_BUSY;
1137 adv_abort_ccb(adv, ccb->ccb_h.target_id,
1138 ccb->ccb_h.target_lun,
1139 /*ccb*/NULL, CAM_REQUEUE_REQ,
1140 /*queued_only*/TRUE);
1142 case QHSTA_M_NO_AUTO_REQ_SENSE:
1143 case QHSTA_NO_ERROR:
1144 ccb->csio.scsi_status = scsi_status;
1145 switch (scsi_status) {
1146 case SCSI_STATUS_CHECK_COND:
1147 case SCSI_STATUS_CMD_TERMINATED:
1148 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1149 /* Structure copy */
1150 ccb->csio.sense_data =
1151 adv->sense_buffers[q_no - 1];
1153 case SCSI_STATUS_BUSY:
1154 case SCSI_STATUS_RESERV_CONFLICT:
1155 case SCSI_STATUS_QUEUE_FULL:
1156 case SCSI_STATUS_COND_MET:
1157 case SCSI_STATUS_INTERMED:
1158 case SCSI_STATUS_INTERMED_COND_MET:
1159 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1161 case SCSI_STATUS_OK:
1162 ccb->ccb_h.status |= CAM_REQ_CMP;
1166 case QHSTA_M_SEL_TIMEOUT:
1167 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1169 case QHSTA_M_DATA_OVER_RUN:
1170 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1172 case QHSTA_M_UNEXPECTED_BUS_FREE:
1173 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1175 case QHSTA_M_BAD_BUS_PHASE_SEQ:
1176 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1178 case QHSTA_M_BAD_CMPL_STATUS_IN:
1179 /* No command complete after a status message */
1180 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1182 case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT:
1183 case QHSTA_M_WTM_TIMEOUT:
1184 case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET:
1185 /* The SCSI bus hung in a phase */
1186 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1187 adv_reset_bus(adv, /*initiate_reset*/TRUE);
1189 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1190 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1192 case QHSTA_D_QDONE_SG_LIST_CORRUPTED:
1193 case QHSTA_D_ASC_DVC_ERROR_CODE_SET:
1194 case QHSTA_D_HOST_ABORT_FAILED:
1195 case QHSTA_D_EXE_SCSI_Q_FAILED:
1196 case QHSTA_D_ASPI_NO_BUF_POOL:
1197 case QHSTA_M_BAD_TAG_CODE:
1198 case QHSTA_D_LRAM_CMP_ERROR:
1199 case QHSTA_M_MICRO_CODE_ERROR_HALT:
1201 panic("%s: Unhandled Host status error %x",
1202 device_get_nameunit(adv->dev), host_stat);
1207 case QD_ABORTED_BY_HOST:
1208 /* Don't clobber any, more explicit, error codes we've set */
1209 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1210 ccb->ccb_h.status = CAM_REQ_ABORTED;
1214 xpt_print_path(ccb->ccb_h.path);
1215 printf("adv_done - queue done with unknown status %x:%x\n",
1216 done_stat, host_stat);
1217 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1220 adv_clear_state(adv, ccb);
1221 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP
1222 && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1223 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1224 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1226 adv_free_ccb_info(adv, cinfo);
1228 * Null this out so that we catch driver bugs that cause a
1229 * ccb to be completed twice.
1231 ccb->ccb_h.ccb_cinfo_ptr = NULL;
1232 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1237 * Function to poll for command completion when
1238 * interrupts are disabled (crash dumps)
1241 adv_poll(struct cam_sim *sim)
1244 adv_intr_locked(cam_sim_softc(sim));
1248 * Attach all the sub-devices we can find
1252 struct adv_softc *adv;
1254 struct ccb_setasync csa;
1255 struct cam_devq *devq;
1259 * Allocate an array of ccb mapping structures. We put the
1260 * index of the ccb_info structure into the queue representing
1261 * a transaction and use it for mapping the queue to the
1262 * upper level SCSI transaction it represents.
1264 adv->ccb_infos = malloc(sizeof(*adv->ccb_infos) * adv->max_openings,
1265 M_DEVBUF, M_NOWAIT);
1267 if (adv->ccb_infos == NULL)
1273 * Create our DMA tags. These tags define the kinds of device
1274 * accessible memory allocations and memory mappings we will
1275 * need to perform during normal operation.
1277 * Unless we need to further restrict the allocation, we rely
1278 * on the restrictions of the parent dmat, hence the common
1279 * use of MAXADDR and MAXSIZE.
1281 * The ASC boards use chains of "queues" (the transactional
1282 * resources on the board) to represent long S/G lists.
1283 * The first queue represents the command and holds a
1284 * single address and data pair. The queues that follow
1285 * can each hold ADV_SG_LIST_PER_Q entries. Given the
1286 * total number of queues, we can express the largest
1287 * transaction we can map. We reserve a few queues for
1288 * error recovery. Take those into account as well.
1290 * There is a way to take an interrupt to download the
1291 * next batch of S/G entries if there are more than 255
1292 * of them (the counter in the queue structure is a u_int8_t).
1293 * We don't use this feature, so limit the S/G list size
1296 max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q;
1300 /* DMA tag for mapping buffers into device visible space. */
1301 if (bus_dma_tag_create(
1302 /* parent */ adv->parent_dmat,
1305 /* lowaddr */ BUS_SPACE_MAXADDR,
1306 /* highaddr */ BUS_SPACE_MAXADDR,
1308 /* filterarg */ NULL,
1309 /* maxsize */ ADV_MAXPHYS,
1310 /* nsegments */ max_sg,
1311 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1312 /* flags */ BUS_DMA_ALLOCNOW,
1313 /* lockfunc */ busdma_lock_mutex,
1314 /* lockarg */ &adv->lock,
1315 &adv->buffer_dmat) != 0) {
1320 /* DMA tag for our sense buffers */
1321 if (bus_dma_tag_create(
1322 /* parent */ adv->parent_dmat,
1325 /* lowaddr */ BUS_SPACE_MAXADDR,
1326 /* highaddr */ BUS_SPACE_MAXADDR,
1328 /* filterarg */ NULL,
1329 /* maxsize */ sizeof(struct scsi_sense_data) *
1332 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1334 /* lockfunc */ busdma_lock_mutex,
1335 /* lockarg */ &adv->lock,
1336 &adv->sense_dmat) != 0) {
1342 /* Allocation for our sense buffers */
1343 if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers,
1344 BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) {
1350 /* And permanently map them */
1351 bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap,
1353 sizeof(struct scsi_sense_data)*adv->max_openings,
1354 adv_map, &adv->sense_physbase, /*flags*/0);
1361 if (adv_start_chip(adv) != 1) {
1362 device_printf(adv->dev,
1363 "Unable to start on board processor. Aborting.\n");
1368 * Create the device queue for our SIM.
1370 devq = cam_simq_alloc(adv->max_openings);
1375 * Construct our SIM entry.
1377 adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv,
1378 device_get_unit(adv->dev), &adv->lock, 1, adv->max_openings, devq);
1379 if (adv->sim == NULL)
1385 * XXX Twin Channel EISA Cards???
1387 mtx_lock(&adv->lock);
1388 if (xpt_bus_register(adv->sim, adv->dev, 0) != CAM_SUCCESS) {
1389 cam_sim_free(adv->sim, /*free devq*/TRUE);
1390 mtx_unlock(&adv->lock);
1394 if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim),
1395 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1397 xpt_bus_deregister(cam_sim_path(adv->sim));
1398 cam_sim_free(adv->sim, /*free devq*/TRUE);
1399 mtx_unlock(&adv->lock);
1403 xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5);
1404 csa.ccb_h.func_code = XPT_SASYNC_CB;
1405 csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
1406 csa.callback = advasync;
1407 csa.callback_arg = adv;
1408 xpt_action((union ccb *)&csa);
1409 mtx_unlock(&adv->lock);
1412 MODULE_DEPEND(adv, cam, 1, 1, 1);