2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
4 * Copyright (c) 1994-2002 Justin T. Gibbs.
5 * Copyright (c) 2001-2002 Adaptec Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * Alternatively, this software may be distributed under the terms of the
18 * GNU Public License ("GPL").
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#22 $
37 #include <dev/aic7xxx/aic79xx_osm.h>
38 #include <dev/aic7xxx/aic79xx_inline.h>
45 #ifndef AHD_TMODE_ENABLE
46 #define AHD_TMODE_ENABLE 0
49 #define ccb_scb_ptr spriv_ptr0
52 static void ahd_dump_targcmd(struct target_cmd *cmd);
54 static int ahd_modevent(module_t mod, int type, void *data);
55 static void ahd_action(struct cam_sim *sim, union ccb *ccb);
56 static void ahd_set_tran_settings(struct ahd_softc *ahd,
57 int our_id, char channel,
58 struct ccb_trans_settings *cts);
59 static void ahd_get_tran_settings(struct ahd_softc *ahd,
60 int our_id, char channel,
61 struct ccb_trans_settings *cts);
62 static void ahd_async(void *callback_arg, uint32_t code,
63 struct cam_path *path, void *arg);
64 static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
65 int nsegments, int error);
66 static void ahd_poll(struct cam_sim *sim);
67 static void ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
68 struct ccb_scsiio *csio, struct scb *scb);
69 static void ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim,
71 static int ahd_create_path(struct ahd_softc *ahd,
72 char channel, u_int target, u_int lun,
73 struct cam_path **path);
76 static void ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
80 ahd_create_path(struct ahd_softc *ahd, char channel, u_int target,
81 u_int lun, struct cam_path **path)
86 path_id = cam_sim_path(ahd->platform_data->sim_b);
88 path_id = cam_sim_path(ahd->platform_data->sim);
90 return (xpt_create_path(path, /*periph*/NULL,
91 path_id, target, lun));
95 ahd_map_int(struct ahd_softc *ahd)
99 /* Hook up our interrupt handler */
100 error = bus_setup_intr(ahd->dev_softc, ahd->platform_data->irq,
101 INTR_TYPE_CAM, ahd_platform_intr, ahd,
102 &ahd->platform_data->ih);
104 device_printf(ahd->dev_softc, "bus_setup_intr() failed: %d\n",
110 * Attach all the sub-devices we can find
113 ahd_attach(struct ahd_softc *ahd)
116 struct ccb_setasync csa;
117 struct cam_devq *devq;
119 struct cam_path *path;
126 ahd_controller_info(ahd, ahd_info);
127 printf("%s\n", ahd_info);
131 * Create the device queue for our SIM(s).
133 devq = cam_simq_alloc(AHD_MAX_QUEUE);
138 * Construct our SIM entry
140 sim = cam_sim_alloc(ahd_action, ahd_poll, "ahd", ahd,
141 device_get_unit(ahd->dev_softc),
142 1, /*XXX*/256, devq);
148 if (xpt_bus_register(sim, /*bus_id*/0) != CAM_SUCCESS) {
149 cam_sim_free(sim, /*free_devq*/TRUE);
154 if (xpt_create_path(&path, /*periph*/NULL,
155 cam_sim_path(sim), CAM_TARGET_WILDCARD,
156 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
157 xpt_bus_deregister(cam_sim_path(sim));
158 cam_sim_free(sim, /*free_devq*/TRUE);
163 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
164 csa.ccb_h.func_code = XPT_SASYNC_CB;
165 csa.event_enable = AC_LOST_DEVICE;
166 csa.callback = ahd_async;
167 csa.callback_arg = sim;
168 xpt_action((union ccb *)&csa);
172 ahd->platform_data->sim = sim;
173 ahd->platform_data->path = path;
175 /* We have to wait until after any system dumps... */
176 ahd->platform_data->eh =
177 EVENTHANDLER_REGISTER(shutdown_final, ahd_shutdown,
178 ahd, SHUTDOWN_PRI_DEFAULT);
179 ahd_intr_enable(ahd, TRUE);
188 * Catch an interrupt from the adapter
191 ahd_platform_intr(void *arg)
193 struct ahd_softc *ahd;
195 ahd = (struct ahd_softc *)arg;
200 * We have an scb which has been processed by the
201 * adaptor, now we look to see how the operation
205 ahd_done(struct ahd_softc *ahd, struct scb *scb)
209 CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE,
210 ("ahd_done - scb %d\n", SCB_GET_TAG(scb)));
213 LIST_REMOVE(scb, pending_links);
215 untimeout(ahd_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch);
217 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
220 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
221 op = BUS_DMASYNC_POSTREAD;
223 op = BUS_DMASYNC_POSTWRITE;
224 bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
225 bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
228 #ifdef AHD_TARGET_MODE
229 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
230 struct cam_path *ccb_path;
233 * If we have finally disconnected, clean up our
234 * pending device state.
235 * XXX - There may be error states that cause where
236 * we will remain connected.
238 ccb_path = ccb->ccb_h.path;
239 if (ahd->pending_device != NULL
240 && xpt_path_comp(ahd->pending_device->path, ccb_path) == 0) {
242 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
243 ahd->pending_device = NULL;
245 xpt_print_path(ccb->ccb_h.path);
246 printf("Still disconnected\n");
251 if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG)
252 ccb->ccb_h.status |= CAM_REQ_CMP;
253 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
254 ahd_free_scb(ahd, scb);
261 * If the recovery SCB completes, we have to be
262 * out of our timeout.
264 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
265 struct scb *list_scb;
268 * We were able to complete the command successfully,
269 * so reinstate the timeouts for all other pending
272 LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
276 ccb = list_scb->io_ctx;
277 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
280 time = ccb->ccb_h.timeout;
283 ccb->ccb_h.timeout_ch =
284 timeout(ahd_timeout, list_scb, time);
287 if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
288 || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
289 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
290 ahd_print_path(ahd, scb);
291 printf("no longer in timeout, status = %x\n",
295 /* Don't clobber any existing error state */
296 if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) {
297 ccb->ccb_h.status |= CAM_REQ_CMP;
298 } else if ((scb->flags & SCB_SENSE) != 0) {
300 * We performed autosense retrieval.
302 * Zero any sense not transferred by the
303 * device. The SCSI spec mandates that any
304 * untransfered data should be assumed to be
305 * zero. Complete the 'bounce' of sense information
306 * through buffers accessible via bus-space by
307 * copying it into the clients csio.
309 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
310 memcpy(&ccb->csio.sense_data,
311 ahd_get_sense_buf(ahd, scb),
312 /* XXX What size do we want to use??? */
313 sizeof(ccb->csio.sense_data)
314 - ccb->csio.sense_resid);
315 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
316 } else if ((scb->flags & SCB_PKT_SENSE) != 0) {
317 struct scsi_status_iu_header *siu;
322 * Copy only the sense data into the provided buffer.
324 siu = (struct scsi_status_iu_header *)scb->sense_data;
325 sense_len = MIN(scsi_4btoul(siu->sense_length),
326 sizeof(ccb->csio.sense_data));
327 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data));
328 memcpy(&ccb->csio.sense_data,
329 ahd_get_sense_buf(ahd, scb) + SIU_SENSE_OFFSET(siu),
331 printf("Copied %d bytes of sense data offset %d:", sense_len,
332 SIU_SENSE_OFFSET(siu));
333 for (i = 0; i < sense_len; i++)
334 printf(" 0x%x", ((uint8_t *)&ccb->csio.sense_data)[i]);
336 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID;
338 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
339 ahd_free_scb(ahd, scb);
344 ahd_action(struct cam_sim *sim, union ccb *ccb)
346 struct ahd_softc *ahd;
347 #ifdef AHD_TARGET_MODE
348 struct ahd_tmode_lstate *lstate;
354 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahd_action\n"));
356 ahd = (struct ahd_softc *)cam_sim_softc(sim);
358 target_id = ccb->ccb_h.target_id;
359 our_id = SIM_SCSI_ID(ahd, sim);
361 switch (ccb->ccb_h.func_code) {
362 /* Common cases first */
363 #ifdef AHD_TARGET_MODE
364 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
365 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/
367 struct ahd_tmode_tstate *tstate;
370 status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
373 if (status != CAM_REQ_CMP) {
374 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
375 /* Response from the black hole device */
377 lstate = ahd->black_hole;
379 ccb->ccb_h.status = status;
384 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
387 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
389 ccb->ccb_h.status = CAM_REQ_INPROG;
390 if ((ahd->flags & AHD_TQINFIFO_BLOCKED) != 0)
391 ahd_run_tqinfifo(ahd, /*paused*/FALSE);
397 * The target_id represents the target we attempt to
398 * select. In target mode, this is the initiator of
399 * the original command.
402 target_id = ccb->csio.init_id;
406 case XPT_SCSI_IO: /* Execute the requested I/O operation */
407 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
410 struct hardware_scb *hscb;
411 struct ahd_initiator_tinfo *tinfo;
412 struct ahd_tmode_tstate *tstate;
415 if ((ahd->flags & AHD_INITIATORROLE) == 0
416 && (ccb->ccb_h.func_code == XPT_SCSI_IO
417 || ccb->ccb_h.func_code == XPT_RESET_DEV)) {
418 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
427 tinfo = ahd_fetch_transinfo(ahd, 'A', our_id,
429 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0
430 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0
431 || ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
432 col_idx = AHD_NEVER_COL_IDX;
434 col_idx = AHD_BUILD_COL_IDX(target_id,
435 ccb->ccb_h.target_lun);
437 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
439 xpt_freeze_simq(sim, /*count*/1);
440 ahd->flags |= AHD_RESOURCE_SHORTAGE;
442 ccb->ccb_h.status = CAM_REQUEUE_REQ;
450 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE,
451 ("start scb(%p)\n", scb));
454 * So we can find the SCB when an abort is requested
456 ccb->ccb_h.ccb_scb_ptr = scb;
459 * Put all the arguments for the xfer in the scb
462 hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
463 hscb->lun = ccb->ccb_h.target_lun;
464 if (ccb->ccb_h.func_code == XPT_RESET_DEV) {
466 scb->flags |= SCB_DEVICE_RESET;
467 hscb->control |= MK_MESSAGE;
468 ahd_execute_scb(scb, NULL, 0, 0);
470 #ifdef AHD_TARGET_MODE
471 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
472 struct target_data *tdata;
474 tdata = &hscb->shared_data.tdata;
475 if (ahd->pending_device == lstate)
476 scb->flags |= SCB_TARGET_IMMEDIATE;
477 hscb->control |= TARGET_SCB;
478 tdata->target_phases = 0;
479 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
480 tdata->target_phases |= SPHASE_PENDING;
482 ccb->csio.scsi_status;
484 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT)
485 tdata->target_phases |= NO_DISCONNECT;
487 tdata->initiator_tag =
488 ahd_htole16(ccb->csio.tag_id);
491 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID)
492 hscb->control |= ccb->csio.tag_action;
494 ahd_setup_data(ahd, sim, &ccb->csio, scb);
498 #ifdef AHD_TARGET_MODE
500 case XPT_IMMED_NOTIFY:
502 struct ahd_tmode_tstate *tstate;
503 struct ahd_tmode_lstate *lstate;
506 status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate,
509 if (status != CAM_REQ_CMP) {
510 ccb->ccb_h.status = status;
514 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
516 ccb->ccb_h.status = CAM_REQ_INPROG;
517 ahd_send_lstate_events(ahd, lstate);
520 case XPT_EN_LUN: /* Enable LUN as a target */
521 ahd_handle_en_lun(ahd, sim, ccb);
525 case XPT_ABORT: /* Abort the specified CCB */
527 ahd_abort_ccb(ahd, sim, ccb);
530 case XPT_SET_TRAN_SETTINGS:
533 ahd_set_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
534 SIM_CHANNEL(ahd, sim), &ccb->cts);
539 case XPT_GET_TRAN_SETTINGS:
540 /* Get default/user set transfer settings for the target */
543 ahd_get_tran_settings(ahd, SIM_SCSI_ID(ahd, sim),
544 SIM_CHANNEL(ahd, sim), &ccb->cts);
549 case XPT_CALC_GEOMETRY:
551 struct ccb_calc_geometry *ccg;
553 uint32_t secs_per_cylinder;
557 size_mb = ccg->volume_size
558 / ((1024L * 1024L) / ccg->block_size);
559 extended = ahd->flags & AHD_EXTENDED_TRANS_A;
561 if (size_mb > 1024 && extended) {
563 ccg->secs_per_track = 63;
566 ccg->secs_per_track = 32;
568 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
569 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
570 ccb->ccb_h.status = CAM_REQ_CMP;
574 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
579 found = ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
580 /*initiate reset*/TRUE);
583 xpt_print_path(SIM_PATH(ahd, sim));
584 printf("SCSI bus reset delivered. "
585 "%d SCBs aborted.\n", found);
587 ccb->ccb_h.status = CAM_REQ_CMP;
591 case XPT_TERM_IO: /* Terminate the I/O process */
593 ccb->ccb_h.status = CAM_REQ_INVALID;
596 case XPT_PATH_INQ: /* Path routing inquiry */
598 struct ccb_pathinq *cpi = &ccb->cpi;
600 cpi->version_num = 1; /* XXX??? */
601 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
602 if ((ahd->features & AHD_WIDE) != 0)
603 cpi->hba_inquiry |= PI_WIDE_16;
604 if ((ahd->features & AHD_TARGETMODE) != 0) {
605 cpi->target_sprt = PIT_PROCESSOR
609 cpi->target_sprt = 0;
612 cpi->hba_eng_cnt = 0;
613 cpi->max_target = (ahd->features & AHD_WIDE) ? 15 : 7;
614 cpi->max_lun = AHD_NUM_LUNS - 1;
615 cpi->initiator_id = ahd->our_id;
616 if ((ahd->flags & AHD_RESET_BUS_A) == 0) {
617 cpi->hba_misc |= PIM_NOBUSRESET;
619 cpi->bus_id = cam_sim_bus(sim);
620 cpi->base_transfer_speed = 3300;
621 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
622 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
623 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
624 cpi->unit_number = cam_sim_unit(sim);
625 #ifdef AHD_NEW_TRAN_SETTINGS
626 cpi->protocol = PROTO_SCSI;
627 cpi->protocol_version = SCSI_REV_2;
628 cpi->transport = XPORT_SPI;
629 cpi->transport_version = 2;
630 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST;
631 cpi->transport_version = 4;
632 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST;
634 cpi->ccb_h.status = CAM_REQ_CMP;
639 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
647 ahd_set_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
648 struct ccb_trans_settings *cts)
650 #ifdef AHD_NEW_TRAN_SETTINGS
651 struct ahd_devinfo devinfo;
652 struct ccb_trans_settings_scsi *scsi;
653 struct ccb_trans_settings_spi *spi;
654 struct ahd_initiator_tinfo *tinfo;
655 struct ahd_tmode_tstate *tstate;
656 uint16_t *discenable;
660 scsi = &cts->proto_specific.scsi;
661 spi = &cts->xport_specific.spi;
662 ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
663 cts->ccb_h.target_id,
664 cts->ccb_h.target_lun,
665 SIM_CHANNEL(ahd, sim),
667 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
669 devinfo.target, &tstate);
671 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
672 update_type |= AHD_TRANS_GOAL;
673 discenable = &tstate->discenable;
674 tagenable = &tstate->tagenable;
675 tinfo->curr.protocol_version = cts->protocol_version;
676 tinfo->curr.transport_version = cts->transport_version;
677 tinfo->goal.protocol_version = cts->protocol_version;
678 tinfo->goal.transport_version = cts->transport_version;
679 } else if (cts->type == CTS_TYPE_USER_SETTINGS) {
680 update_type |= AHD_TRANS_USER;
681 discenable = &ahd->user_discenable;
682 tagenable = &ahd->user_tagenable;
683 tinfo->user.protocol_version = cts->protocol_version;
684 tinfo->user.transport_version = cts->transport_version;
686 cts->ccb_h.status = CAM_REQ_INVALID;
690 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
691 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
692 *discenable |= devinfo.target_mask;
694 *discenable &= ~devinfo.target_mask;
697 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
698 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
699 *tagenable |= devinfo.target_mask;
701 *tagenable &= ~devinfo.target_mask;
704 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
705 ahd_validate_width(ahd, /*tinfo limit*/NULL,
706 &spi->bus_width, ROLE_UNKNOWN);
707 ahd_set_width(ahd, &devinfo, spi->bus_width,
708 update_type, /*paused*/FALSE);
711 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) {
712 if (update_type == AHD_TRANS_USER)
713 spi->ppr_options = tinfo->user.ppr_options;
715 spi->ppr_options = tinfo->goal.ppr_options;
718 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
719 if (update_type == AHD_TRANS_USER)
720 spi->sync_offset = tinfo->user.offset;
722 spi->sync_offset = tinfo->goal.offset;
725 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
726 if (update_type == AHD_TRANS_USER)
727 spi->sync_period = tinfo->user.period;
729 spi->sync_period = tinfo->goal.period;
732 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
733 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
736 maxsync = AHD_SYNCRATE_MAX;
738 if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT)
739 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
741 if ((*discenable & devinfo.target_mask) == 0)
742 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
744 ahd_find_syncrate(ahd, &spi->sync_period,
745 &spi->ppr_options, maxsync);
746 ahd_validate_offset(ahd, /*tinfo limit*/NULL,
747 spi->sync_period, &spi->sync_offset,
748 spi->bus_width, ROLE_UNKNOWN);
750 /* We use a period of 0 to represent async */
751 if (spi->sync_offset == 0) {
752 spi->sync_period = 0;
753 spi->ppr_options = 0;
756 ahd_set_syncrate(ahd, &devinfo, spi->sync_period,
757 spi->sync_offset, spi->ppr_options,
758 update_type, /*paused*/FALSE);
760 cts->ccb_h.status = CAM_REQ_CMP;
762 struct ahd_devinfo devinfo;
763 struct ahd_initiator_tinfo *tinfo;
764 struct ahd_tmode_tstate *tstate;
765 uint16_t *discenable;
769 ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
770 cts->ccb_h.target_id,
771 cts->ccb_h.target_lun,
772 SIM_CHANNEL(ahd, sim),
774 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
776 devinfo.target, &tstate);
778 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
779 update_type |= AHD_TRANS_GOAL;
780 discenable = &tstate->discenable;
781 tagenable = &tstate->tagenable;
782 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
783 update_type |= AHD_TRANS_USER;
784 discenable = &ahd->user_discenable;
785 tagenable = &ahd->user_tagenable;
787 cts->ccb_h.status = CAM_REQ_INVALID;
791 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
792 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
793 *discenable |= devinfo.target_mask;
795 *discenable &= ~devinfo.target_mask;
798 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
799 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
800 *tagenable |= devinfo.target_mask;
802 *tagenable &= ~devinfo.target_mask;
805 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
806 ahd_validate_width(ahd, /*tinfo limit*/NULL,
807 &cts->bus_width, ROLE_UNKNOWN);
808 ahd_set_width(ahd, &devinfo, cts->bus_width,
809 update_type, /*paused*/FALSE);
812 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
813 if (update_type == AHD_TRANS_USER)
814 cts->sync_offset = tinfo->user.offset;
816 cts->sync_offset = tinfo->goal.offset;
819 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
820 if (update_type == AHD_TRANS_USER)
821 cts->sync_period = tinfo->user.period;
823 cts->sync_period = tinfo->goal.period;
826 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
827 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)
828 || ((cts->valid & CCB_TRANS_TQ_VALID) != 0)
829 || ((cts->valid & CCB_TRANS_DISC_VALID) != 0)) {
833 maxsync = AHD_SYNCRATE_MAX;
835 if (cts->sync_period <= AHD_SYNCRATE_DT
836 && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT) {
837 ppr_options = tinfo->user.ppr_options
838 | MSG_EXT_PPR_DT_REQ;
841 if ((*tagenable & devinfo.target_mask) == 0
842 || (*discenable & devinfo.target_mask) == 0)
843 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
845 ahd_find_syncrate(ahd, &cts->sync_period,
846 &ppr_options, maxsync);
847 ahd_validate_offset(ahd, /*tinfo limit*/NULL,
848 cts->sync_period, &cts->sync_offset,
849 MSG_EXT_WDTR_BUS_8_BIT,
852 /* We use a period of 0 to represent async */
853 if (cts->sync_offset == 0) {
854 cts->sync_period = 0;
859 && tinfo->user.transport_version >= 3) {
860 tinfo->goal.transport_version =
861 tinfo->user.transport_version;
862 tinfo->curr.transport_version =
863 tinfo->user.transport_version;
866 ahd_set_syncrate(ahd, &devinfo, cts->sync_period,
867 cts->sync_offset, ppr_options,
868 update_type, /*paused*/FALSE);
870 cts->ccb_h.status = CAM_REQ_CMP;
875 ahd_get_tran_settings(struct ahd_softc *ahd, int our_id, char channel,
876 struct ccb_trans_settings *cts)
878 #ifdef AHD_NEW_TRAN_SETTINGS
879 struct ahd_devinfo devinfo;
880 struct ccb_trans_settings_scsi *scsi;
881 struct ccb_trans_settings_spi *spi;
882 struct ahd_initiator_tinfo *targ_info;
883 struct ahd_tmode_tstate *tstate;
884 struct ahd_transinfo *tinfo;
886 scsi = &cts->proto_specific.scsi;
887 spi = &cts->xport_specific.spi;
888 ahd_compile_devinfo(&devinfo, our_id,
889 cts->ccb_h.target_id,
890 cts->ccb_h.target_lun,
891 channel, ROLE_UNKNOWN);
892 targ_info = ahd_fetch_transinfo(ahd, devinfo.channel,
894 devinfo.target, &tstate);
896 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
897 tinfo = &targ_info->curr;
899 tinfo = &targ_info->user;
901 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
902 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
903 if (cts->type == CTS_TYPE_USER_SETTINGS) {
904 if ((ahd->user_discenable & devinfo.target_mask) != 0)
905 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
907 if ((ahd->user_tagenable & devinfo.target_mask) != 0)
908 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
910 if ((tstate->discenable & devinfo.target_mask) != 0)
911 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
913 if ((tstate->tagenable & devinfo.target_mask) != 0)
914 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
916 cts->protocol_version = tinfo->protocol_version;
917 cts->transport_version = tinfo->transport_version;
919 spi->sync_period = tinfo->period;
920 spi->sync_offset = tinfo->offset;
921 spi->bus_width = tinfo->width;
922 spi->ppr_options = tinfo->ppr_options;
924 cts->protocol = PROTO_SCSI;
925 cts->transport = XPORT_SPI;
926 spi->valid = CTS_SPI_VALID_SYNC_RATE
927 | CTS_SPI_VALID_SYNC_OFFSET
928 | CTS_SPI_VALID_BUS_WIDTH
929 | CTS_SPI_VALID_PPR_OPTIONS;
931 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
932 scsi->valid = CTS_SCSI_VALID_TQ;
933 spi->valid |= CTS_SPI_VALID_DISC;
938 cts->ccb_h.status = CAM_REQ_CMP;
940 struct ahd_devinfo devinfo;
941 struct ahd_initiator_tinfo *targ_info;
942 struct ahd_tmode_tstate *tstate;
943 struct ahd_transinfo *tinfo;
945 ahd_compile_devinfo(&devinfo, our_id,
946 cts->ccb_h.target_id,
947 cts->ccb_h.target_lun,
948 channel, ROLE_UNKNOWN);
949 targ_info = ahd_fetch_transinfo(ahd, devinfo.channel,
951 devinfo.target, &tstate);
953 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
954 tinfo = &targ_info->curr;
956 tinfo = &targ_info->user;
958 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
959 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) {
960 if ((ahd->user_discenable & devinfo.target_mask) != 0)
961 cts->flags |= CCB_TRANS_DISC_ENB;
963 if ((ahd->user_tagenable & devinfo.target_mask) != 0)
964 cts->flags |= CCB_TRANS_TAG_ENB;
966 if ((tstate->discenable & devinfo.target_mask) != 0)
967 cts->flags |= CCB_TRANS_DISC_ENB;
969 if ((tstate->tagenable & devinfo.target_mask) != 0)
970 cts->flags |= CCB_TRANS_TAG_ENB;
972 cts->sync_period = tinfo->period;
973 cts->sync_offset = tinfo->offset;
974 cts->bus_width = tinfo->width;
976 cts->valid = CCB_TRANS_SYNC_RATE_VALID
977 | CCB_TRANS_SYNC_OFFSET_VALID
978 | CCB_TRANS_BUS_WIDTH_VALID;
980 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD)
981 cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID;
983 cts->ccb_h.status = CAM_REQ_CMP;
988 ahd_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg)
990 struct ahd_softc *ahd;
993 sim = (struct cam_sim *)callback_arg;
994 ahd = (struct ahd_softc *)cam_sim_softc(sim);
998 struct ahd_devinfo devinfo;
1001 ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim),
1002 xpt_path_target_id(path),
1003 xpt_path_lun_id(path),
1004 SIM_CHANNEL(ahd, sim),
1008 * Revert to async/narrow transfers
1009 * for the next device.
1012 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
1013 AHD_TRANS_GOAL|AHD_TRANS_CUR, /*paused*/FALSE);
1014 ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0,
1015 /*ppr_options*/0, AHD_TRANS_GOAL|AHD_TRANS_CUR,
1017 ahd_unlock(ahd, &s);
1026 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments,
1031 struct ahd_softc *ahd;
1032 struct ahd_initiator_tinfo *tinfo;
1033 struct ahd_tmode_tstate *tstate;
1037 scb = (struct scb *)arg;
1039 ahd = scb->ahd_softc;
1043 ahd_set_transaction_status(scb, CAM_REQ_TOO_BIG);
1045 ahd_set_transaction_status(scb, CAM_REQ_CMP_ERR);
1047 bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap);
1049 ahd_free_scb(ahd, scb);
1050 ahd_unlock(ahd, &s);
1055 if (nsegments != 0) {
1057 bus_dmasync_op_t op;
1060 /* Copy the segments into our SG list */
1061 for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
1063 sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
1069 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1070 op = BUS_DMASYNC_PREREAD;
1072 op = BUS_DMASYNC_PREWRITE;
1074 bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op);
1076 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1077 struct target_data *tdata;
1079 tdata = &scb->hscb->shared_data.tdata;
1080 tdata->target_phases |= DPHASE_PENDING;
1081 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1082 tdata->data_phase = P_DATAOUT;
1084 tdata->data_phase = P_DATAIN;
1091 * Last time we need to check if this SCB needs to
1094 if (ahd_get_transaction_status(scb) != CAM_REQ_INPROG) {
1096 bus_dmamap_unload(ahd->buffer_dmat,
1098 ahd_free_scb(ahd, scb);
1099 ahd_unlock(ahd, &s);
1104 tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
1105 SCSIID_OUR_ID(scb->hscb->scsiid),
1106 SCSIID_TARGET(ahd, scb->hscb->scsiid),
1109 mask = SCB_GET_TARGET_MASK(ahd, scb);
1111 if ((tstate->discenable & mask) != 0
1112 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1113 scb->hscb->control |= DISCENB;
1115 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
1116 scb->flags |= SCB_PACKETIZED;
1118 if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0
1119 && (tinfo->goal.width != 0
1120 || tinfo->goal.period != 0
1121 || tinfo->goal.ppr_options != 0)) {
1122 scb->flags |= SCB_NEGOTIATE;
1123 scb->hscb->control |= MK_MESSAGE;
1124 } else if ((tstate->auto_negotiate & mask) != 0) {
1125 scb->flags |= SCB_AUTO_NEGOTIATE;
1126 scb->hscb->control |= MK_MESSAGE;
1129 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1131 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1133 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1136 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1137 ccb->ccb_h.timeout = 5 * 1000;
1139 time = ccb->ccb_h.timeout;
1142 ccb->ccb_h.timeout_ch =
1143 timeout(ahd_timeout, (caddr_t)scb, time);
1146 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
1147 /* Define a mapping from our tag to the SCB. */
1148 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
1150 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
1151 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
1154 ahd_queue_scb(ahd, scb);
1157 ahd_unlock(ahd, &s);
1161 ahd_poll(struct cam_sim *sim)
1163 ahd_intr(cam_sim_softc(sim));
1167 ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim,
1168 struct ccb_scsiio *csio, struct scb *scb)
1170 struct hardware_scb *hscb;
1171 struct ccb_hdr *ccb_h;
1174 ccb_h = &csio->ccb_h;
1177 csio->sense_resid = 0;
1178 if (ccb_h->func_code == XPT_SCSI_IO) {
1179 hscb->cdb_len = csio->cdb_len;
1180 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
1182 if (hscb->cdb_len > MAX_CDB_LEN
1183 && (ccb_h->flags & CAM_CDB_PHYS) == 0) {
1186 ahd_set_transaction_status(scb,
1189 ahd_free_scb(ahd, scb);
1190 ahd_unlock(ahd, &s);
1191 xpt_done((union ccb *)csio);
1194 if ((ccb_h->flags & CAM_CDB_PHYS) != 0) {
1195 hscb->shared_data.idata.cdbptr =
1196 ahd_htole64((uintptr_t)csio->cdb_io.cdb_ptr);
1198 memcpy(hscb->shared_data.idata.cdb,
1199 csio->cdb_io.cdb_ptr,
1203 if (hscb->cdb_len > MAX_CDB_LEN) {
1206 ahd_set_transaction_status(scb,
1209 ahd_free_scb(ahd, scb);
1210 ahd_unlock(ahd, &s);
1211 xpt_done((union ccb *)csio);
1214 memcpy(hscb->shared_data.idata.cdb,
1215 csio->cdb_io.cdb_bytes, hscb->cdb_len);
1219 /* Only use S/G if there is a transfer */
1220 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1221 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1222 /* We've been given a pointer to a single buffer */
1223 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1228 error = bus_dmamap_load(ahd->buffer_dmat,
1234 if (error == EINPROGRESS) {
1236 * So as to maintain ordering,
1237 * freeze the controller queue
1238 * until our mapping is
1241 xpt_freeze_simq(sim,
1243 scb->io_ctx->ccb_h.status |=
1248 struct bus_dma_segment seg;
1250 /* Pointer to physical buffer */
1251 if (csio->dxfer_len > AHD_MAXTRANSFER_SIZE)
1252 panic("ahd_setup_data - Transfer size "
1253 "larger than can device max");
1255 seg.ds_addr = (bus_addr_t)csio->data_ptr;
1256 seg.ds_len = csio->dxfer_len;
1257 ahd_execute_scb(scb, &seg, 1, 0);
1260 struct bus_dma_segment *segs;
1262 if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
1263 panic("ahd_setup_data - Physical segment "
1264 "pointers unsupported");
1266 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
1267 panic("ahd_setup_data - Virtual segment "
1268 "addresses unsupported");
1270 /* Just use the segments provided */
1271 segs = (struct bus_dma_segment *)csio->data_ptr;
1272 ahd_execute_scb(scb, segs, csio->sglist_cnt, 0);
1275 ahd_execute_scb(scb, NULL, 0, 0);
1281 ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb) {
1283 if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
1284 struct scb *list_scb;
1286 scb->flags |= SCB_RECOVERY_SCB;
1289 * Take all queued, but not sent SCBs out of the equation.
1290 * Also ensure that no new CCBs are queued to us while we
1291 * try to fix this problem.
1293 if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
1294 xpt_freeze_simq(SCB_GET_SIM(ahd, scb), /*count*/1);
1295 scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ;
1299 * Go through all of our pending SCBs and remove
1300 * any scheduled timeouts for them. We will reschedule
1301 * them after we've successfully fixed this problem.
1303 LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
1306 ccb = list_scb->io_ctx;
1307 untimeout(ahd_timeout, list_scb, ccb->ccb_h.timeout_ch);
1314 ahd_timeout(void *arg)
1317 struct ahd_softc *ahd;
1318 ahd_mode_state saved_modes;
1330 scb = (struct scb *)arg;
1331 ahd = (struct ahd_softc *)scb->ahd_softc;
1335 ahd_pause_and_flushwork(ahd);
1337 saved_modes = ahd_save_modes(ahd);
1339 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1340 ahd_outb(ahd, SCSISIGO, ACKO);
1341 printf("set ACK\n");
1342 ahd_outb(ahd, SCSISIGO, 0);
1343 printf("clearing Ack\n");
1344 ahd_restore_modes(ahd, saved_modes);
1346 if ((scb->flags & SCB_ACTIVE) == 0) {
1347 /* Previous timeout took care of me already */
1348 printf("%s: Timedout SCB already complete. "
1349 "Interrupts may not be functioning.\n", ahd_name(ahd));
1351 ahd_unlock(ahd, &s);
1355 target = SCB_GET_TARGET(ahd, scb);
1356 channel = SCB_GET_CHANNEL(ahd, scb);
1357 lun = SCB_GET_LUN(scb);
1359 ahd_print_path(ahd, scb);
1360 printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
1361 ahd_dump_card_state(ahd);
1362 ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
1363 /*initiate reset*/TRUE);
1364 ahd_unlock(ahd, &s);
1367 last_phase = ahd_inb(ahd, LASTPHASE);
1368 if (scb->sg_count > 0) {
1369 for (i = 0; i < scb->sg_count; i++) {
1370 printf("sg[%d] - Addr 0x%x : Length %d\n",
1372 ((struct ahd_dma_seg *)scb->sg_list)[i].addr,
1373 ((struct ahd_dma_seg *)scb->sg_list)[i].len
1377 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
1379 * Been down this road before.
1380 * Do a full bus reset.
1383 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
1384 found = ahd_reset_channel(ahd, channel, /*Initiate Reset*/TRUE);
1385 printf("%s: Issued Channel %c Bus Reset. "
1386 "%d SCBs aborted\n", ahd_name(ahd), channel, found);
1389 * If we are a target, transition to bus free and report
1392 * The target/initiator that is holding up the bus may not
1393 * be the same as the one that triggered this timeout
1394 * (different commands have different timeout lengths).
1395 * If the bus is idle and we are actiing as the initiator
1396 * for this request, queue a BDR message to the timed out
1397 * target. Otherwise, if the timed out transaction is
1399 * Initiator transaction:
1400 * Stuff the message buffer with a BDR message and assert
1401 * ATN in the hopes that the target will let go of the bus
1402 * and go to the mesgout phase. If this fails, we'll
1403 * get another timeout 2 seconds later which will attempt
1406 * Target transaction:
1407 * Transition to BUS FREE and report the error.
1408 * It's good to be the target!
1410 u_int active_scb_index;
1413 saved_scbptr = ahd_get_scbptr(ahd);
1414 active_scb_index = saved_scbptr;
1416 if (last_phase != P_BUSFREE
1417 && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
1418 && (active_scb_index < ahd->scb_data.numscbs)) {
1419 struct scb *active_scb;
1422 * If the active SCB is not us, assume that
1423 * the active SCB has a longer timeout than
1424 * the timedout SCB, and wait for the active
1427 active_scb = ahd_lookup_scb(ahd, active_scb_index);
1428 if (active_scb != scb) {
1429 struct ccb_hdr *ccbh;
1430 uint64_t newtimeout;
1432 ahd_print_path(ahd, scb);
1433 printf("Other SCB Timeout%s",
1434 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
1435 ? " again\n" : "\n");
1436 scb->flags |= SCB_OTHERTCL_TIMEOUT;
1438 MAX(active_scb->io_ctx->ccb_h.timeout,
1439 scb->io_ctx->ccb_h.timeout);
1442 ccbh = &scb->io_ctx->ccb_h;
1443 scb->io_ctx->ccb_h.timeout_ch =
1444 timeout(ahd_timeout, scb, newtimeout);
1446 ahd_unlock(ahd, &s);
1451 if ((scb->hscb->control & TARGET_SCB) != 0) {
1454 * Send back any queued up transactions
1455 * and properly record the error condition.
1457 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
1458 SCB_GET_CHANNEL(ahd, scb),
1464 /* Will clear us from the bus */
1466 ahd_unlock(ahd, &s);
1470 ahd_set_recoveryscb(ahd, active_scb);
1471 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1472 ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
1473 ahd_print_path(ahd, active_scb);
1474 printf("BDR message in message buffer\n");
1475 active_scb->flags |= SCB_DEVICE_RESET;
1476 active_scb->io_ctx->ccb_h.timeout_ch =
1477 timeout(ahd_timeout, (caddr_t)active_scb, 2 * hz);
1482 /* XXX Shouldn't panic. Just punt instead? */
1483 if ((scb->hscb->control & TARGET_SCB) != 0)
1484 panic("Timed-out target SCB but bus idle");
1486 if (last_phase != P_BUSFREE
1487 && (ahd_inb(ahd, SSTAT0) & TARGET) != 0) {
1488 /* XXX What happened to the SCB? */
1489 /* Hung target selection. Goto busfree */
1490 printf("%s: Hung target selection\n",
1493 ahd_unlock(ahd, &s);
1497 if (ahd_search_qinfifo(ahd, target, channel, lun,
1498 SCB_GET_TAG(scb), ROLE_INITIATOR,
1499 /*status*/0, SEARCH_COUNT) > 0) {
1500 disconnected = FALSE;
1502 disconnected = TRUE;
1507 ahd_set_recoveryscb(ahd, scb);
1509 * Actually re-queue this SCB in an attempt
1510 * to select the device before it reconnects.
1511 * In either case (selection or reselection),
1512 * we will now issue a target reset to the
1515 * Set the MK_MESSAGE control bit indicating
1516 * that we desire to send a message. We
1517 * also set the disconnected flag since
1518 * in the paging case there is no guarantee
1519 * that our SCB control byte matches the
1520 * version on the card. We don't want the
1521 * sequencer to abort the command thinking
1522 * an unsolicited reselection occurred.
1524 scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1525 scb->flags |= SCB_DEVICE_RESET;
1528 * The sequencer will never re-reference the
1529 * in-core SCB. To make sure we are notified
1530 * during reslection, set the MK_MESSAGE flag
1531 * in the card's copy of the SCB.
1533 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
1534 ahd_outb(ahd, SCB_CONTROL,
1535 ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
1538 * Clear out any entries in the QINFIFO first
1539 * so we are the next SCB for this target
1542 ahd_search_qinfifo(ahd,
1543 SCB_GET_TARGET(ahd, scb),
1544 channel, SCB_GET_LUN(scb),
1549 ahd_print_path(ahd, scb);
1550 printf("Queuing a BDR SCB\n");
1551 ahd_qinfifo_requeue_tail(ahd, scb);
1552 ahd_set_scbptr(ahd, saved_scbptr);
1553 scb->io_ctx->ccb_h.timeout_ch =
1554 timeout(ahd_timeout, (caddr_t)scb, 2 * hz);
1557 /* Go "immediatly" to the bus reset */
1558 /* This shouldn't happen */
1559 ahd_set_recoveryscb(ahd, scb);
1560 ahd_print_path(ahd, scb);
1561 printf("SCB %d: Immediate reset. "
1562 "Flags = 0x%x\n", SCB_GET_TAG(scb),
1568 ahd_unlock(ahd, &s);
1573 ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
1575 union ccb *abort_ccb;
1577 abort_ccb = ccb->cab.abort_ccb;
1578 switch (abort_ccb->ccb_h.func_code) {
1579 #ifdef AHD_TARGET_MODE
1580 case XPT_ACCEPT_TARGET_IO:
1581 case XPT_IMMED_NOTIFY:
1582 case XPT_CONT_TARGET_IO:
1584 struct ahd_tmode_tstate *tstate;
1585 struct ahd_tmode_lstate *lstate;
1586 struct ccb_hdr_slist *list;
1589 status = ahd_find_tmode_devs(ahd, sim, abort_ccb, &tstate,
1592 if (status != CAM_REQ_CMP) {
1593 ccb->ccb_h.status = status;
1597 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
1598 list = &lstate->accept_tios;
1599 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY)
1600 list = &lstate->immed_notifies;
1605 struct ccb_hdr *curelm;
1608 curelm = SLIST_FIRST(list);
1610 if (curelm == &abort_ccb->ccb_h) {
1612 SLIST_REMOVE_HEAD(list, sim_links.sle);
1614 while(curelm != NULL) {
1615 struct ccb_hdr *nextelm;
1618 SLIST_NEXT(curelm, sim_links.sle);
1620 if (nextelm == &abort_ccb->ccb_h) {
1633 abort_ccb->ccb_h.status = CAM_REQ_ABORTED;
1634 xpt_done(abort_ccb);
1635 ccb->ccb_h.status = CAM_REQ_CMP;
1637 xpt_print_path(abort_ccb->ccb_h.path);
1638 printf("Not found\n");
1639 ccb->ccb_h.status = CAM_PATH_INVALID;
1647 /* XXX Fully implement the hard ones */
1648 ccb->ccb_h.status = CAM_UA_ABORT;
1651 ccb->ccb_h.status = CAM_REQ_INVALID;
1658 ahd_send_async(struct ahd_softc *ahd, char channel, u_int target,
1659 u_int lun, ac_code code, void *opt_arg)
1661 struct ccb_trans_settings cts;
1662 struct cam_path *path;
1667 error = ahd_create_path(ahd, channel, target, lun, &path);
1669 if (error != CAM_REQ_CMP)
1673 case AC_TRANSFER_NEG:
1675 #ifdef AHD_NEW_TRAN_SETTINGS
1676 struct ccb_trans_settings_scsi *scsi;
1678 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1679 scsi = &cts.proto_specific.scsi;
1681 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1683 cts.ccb_h.path = path;
1684 cts.ccb_h.target_id = target;
1685 cts.ccb_h.target_lun = lun;
1686 ahd_get_tran_settings(ahd, ahd->our_id, channel, &cts);
1688 #ifdef AHD_NEW_TRAN_SETTINGS
1689 scsi->valid &= ~CTS_SCSI_VALID_TQ;
1690 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1692 cts.valid &= ~CCB_TRANS_TQ_VALID;
1693 cts.flags &= ~CCB_TRANS_TAG_ENB;
1695 if (opt_arg == NULL)
1697 if (*((ahd_queue_alg *)opt_arg) == AHD_QUEUE_TAGGED)
1698 #ifdef AHD_NEW_TRAN_SETTINGS
1699 scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB;
1700 scsi->valid |= CTS_SCSI_VALID_TQ;
1702 cts.flags |= CCB_TRANS_TAG_ENB;
1703 cts.valid |= CCB_TRANS_TQ_VALID;
1711 panic("ahd_send_async: Unexpected async event");
1713 xpt_async(code, path, arg);
1714 xpt_free_path(path);
1718 ahd_platform_set_tags(struct ahd_softc *ahd,
1719 struct ahd_devinfo *devinfo, int enable)
1724 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
1726 ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
1728 if (ahd->platform_data == NULL)
1734 ahd_platform_free(struct ahd_softc *ahd)
1736 struct ahd_platform_data *pdata;
1738 pdata = ahd->platform_data;
1739 if (pdata != NULL) {
1740 if (pdata->regs[0] != NULL)
1741 bus_release_resource(ahd->dev_softc,
1742 pdata->regs_res_type[0],
1743 pdata->regs_res_id[0],
1746 if (pdata->regs[1] != NULL)
1747 bus_release_resource(ahd->dev_softc,
1748 pdata->regs_res_type[1],
1749 pdata->regs_res_id[1],
1752 if (pdata->irq != NULL)
1753 bus_release_resource(ahd->dev_softc,
1754 pdata->irq_res_type,
1757 if (pdata->sim_b != NULL) {
1758 xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL);
1759 xpt_free_path(pdata->path_b);
1760 xpt_bus_deregister(cam_sim_path(pdata->sim_b));
1761 cam_sim_free(pdata->sim_b, /*free_devq*/TRUE);
1763 if (pdata->sim != NULL) {
1764 xpt_async(AC_LOST_DEVICE, pdata->path, NULL);
1765 xpt_free_path(pdata->path);
1766 xpt_bus_deregister(cam_sim_path(pdata->sim));
1767 cam_sim_free(pdata->sim, /*free_devq*/TRUE);
1769 if (pdata->eh != NULL)
1770 EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh);
1771 free(ahd->platform_data, M_DEVBUF);
1776 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
1778 /* We don't sort softcs under FreeBSD so report equal always */
1783 ahd_detach(device_t dev)
1785 struct ahd_softc *ahd;
1790 device_printf(dev, "detaching device\n");
1791 ahd = device_get_softc(dev);
1792 ahd = ahd_find_softc(ahd);
1794 device_printf(dev, "aic7xxx already detached\n");
1795 ahd_list_unlock(&l);
1799 ahd_intr_enable(ahd, FALSE);
1800 bus_teardown_intr(dev, ahd->platform_data->irq, ahd->platform_data->ih);
1801 ahd_unlock(ahd, &s);
1803 ahd_list_unlock(&l);
1809 ahd_dump_targcmd(struct target_cmd *cmd)
1815 byte = &cmd->initiator_channel;
1816 /* Debugging info for received commands */
1817 last_byte = &cmd[1].initiator_channel;
1820 while (byte < last_byte) {
1823 printf("%#x", *byte++);
1836 ahd_modevent(module_t mod, int type, void *data)
1838 /* XXX Deal with busy status on unload. */
1842 static moduledata_t ahd_mod = {
1848 /********************************** DDB Hooks *********************************/
1850 static struct ahd_softc *ahd_ddb_softc;
1851 static int ahd_ddb_paused;
1852 static int ahd_ddb_paused_on_entry;
1853 DB_COMMAND(ahd_set_unit, ahd_ddb_set_unit)
1855 struct ahd_softc *list_ahd;
1857 ahd_ddb_softc = NULL;
1858 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
1859 if (list_ahd->unit == addr)
1860 ahd_ddb_softc = list_ahd;
1862 if (ahd_ddb_softc == NULL)
1863 db_error("No matching softc found!\n");
1866 DB_COMMAND(ahd_pause, ahd_ddb_pause)
1868 if (ahd_ddb_softc == NULL) {
1869 db_error("Must set unit with ahd_set_unit first!\n");
1872 if (ahd_ddb_paused == 0) {
1874 if (ahd_is_paused(ahd_ddb_softc)) {
1875 ahd_ddb_paused_on_entry++;
1878 ahd_pause(ahd_ddb_softc);
1882 DB_COMMAND(ahd_unpause, ahd_ddb_unpause)
1884 if (ahd_ddb_softc == NULL) {
1885 db_error("Must set unit with ahd_set_unit first!\n");
1888 if (ahd_ddb_paused != 0) {
1890 if (ahd_ddb_paused_on_entry)
1892 ahd_unpause(ahd_ddb_softc);
1893 } else if (ahd_ddb_paused_on_entry != 0) {
1894 /* Two unpauses to clear a paused on entry. */
1895 ahd_ddb_paused_on_entry = 0;
1896 ahd_unpause(ahd_ddb_softc);
1900 DB_COMMAND(ahd_in, ahd_ddb_in)
1905 if (ahd_ddb_softc == NULL) {
1906 db_error("Must set unit with ahd_set_unit first!\n");
1913 while ((c = *modif++) != '\0') {
1929 while (--count >= 0) {
1930 db_printf("%04x (M)%x: \t", addr,
1931 ahd_inb(ahd_ddb_softc, MODE_PTR));
1934 db_printf("%02x\n", ahd_inb(ahd_ddb_softc, addr));
1937 db_printf("%04x\n", ahd_inw(ahd_ddb_softc, addr));
1940 db_printf("%08x\n", ahd_inl(ahd_ddb_softc, addr));
1946 DB_SET(ahd_out, ahd_ddb_out, db_cmd_set, CS_MORE, NULL)
1948 db_expr_t old_value;
1949 db_expr_t new_value;
1952 if (ahd_ddb_softc == NULL) {
1953 db_error("Must set unit with ahd_set_unit first!\n");
1969 db_error("Unknown size\n");
1973 while (db_expression(&new_value)) {
1977 old_value = ahd_inb(ahd_ddb_softc, addr);
1978 ahd_outb(ahd_ddb_softc, addr, new_value);
1981 old_value = ahd_inw(ahd_ddb_softc, addr);
1982 ahd_outw(ahd_ddb_softc, addr, new_value);
1985 old_value = ahd_inl(ahd_ddb_softc, addr);
1986 ahd_outl(ahd_ddb_softc, addr, new_value);
1989 db_printf("%04x (M)%x: \t0x%x\t=\t0x%x",
1990 addr, ahd_inb(ahd_ddb_softc, MODE_PTR),
1991 old_value, new_value);
2000 DECLARE_MODULE(ahd, ahd_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
2001 MODULE_DEPEND(ahd, cam, 1, 1, 1);
2002 MODULE_VERSION(ahd, 1);