2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
32 #include <sys/endian.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
39 #include <sys/fcntl.h>
41 #include <sys/interrupt.h>
45 #include <sys/mutex.h>
46 #include <sys/sysctl.h>
49 #include <pc98/pc98/pc98_machdep.h> /* geometry translation */
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_queue.h>
55 #include <cam/cam_periph.h>
56 #include <cam/cam_sim.h>
57 #include <cam/cam_xpt.h>
58 #include <cam/cam_xpt_sim.h>
59 #include <cam/cam_xpt_periph.h>
60 #include <cam/cam_xpt_internal.h>
61 #include <cam/cam_debug.h>
63 #include <cam/scsi/scsi_all.h>
64 #include <cam/scsi/scsi_message.h>
65 #include <cam/scsi/scsi_pass.h>
66 #include <cam/ata/ata_all.h>
67 #include <machine/stdarg.h> /* for xpt_print below */
70 struct scsi_quirk_entry {
71 struct scsi_inquiry_pattern inq_pat;
73 #define CAM_QUIRK_NOLUNS 0x01
74 #define CAM_QUIRK_NOSERIAL 0x02
75 #define CAM_QUIRK_HILUNS 0x04
76 #define CAM_QUIRK_NOHILUNS 0x08
80 #define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk))
82 static periph_init_t probe_periph_init;
84 static struct periph_driver probe_driver =
86 probe_periph_init, "aprobe",
87 TAILQ_HEAD_INITIALIZER(probe_driver.units)
90 PERIPHDRIVER_DECLARE(aprobe, probe_driver);
108 static char *probe_action_text[] = {
113 "PROBE_FULL_INQUIRY",
124 #define PROBE_SET_ACTION(softc, newaction) \
127 text = probe_action_text; \
128 CAM_DEBUG((softc)->periph->path, CAM_DEBUG_INFO, \
129 ("Probe %s to %s\n", text[(softc)->action], \
130 text[(newaction)])); \
131 (softc)->action = (newaction); \
135 PROBE_NO_ANNOUNCE = 0x04
139 TAILQ_HEAD(, ccb_hdr) request_ccbs;
149 struct cam_periph *periph;
152 static struct scsi_quirk_entry scsi_quirk_table[] =
155 /* Default tagged queuing parameters for all devices */
157 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
158 /*vendor*/"*", /*product*/"*", /*revision*/"*"
160 /*quirks*/0, /*mintags*/2, /*maxtags*/32
164 static const int scsi_quirk_table_size =
165 sizeof(scsi_quirk_table) / sizeof(*scsi_quirk_table);
167 static cam_status proberegister(struct cam_periph *periph,
169 static void probeschedule(struct cam_periph *probe_periph);
170 static void probestart(struct cam_periph *periph, union ccb *start_ccb);
171 //static void proberequestdefaultnegotiation(struct cam_periph *periph);
172 //static int proberequestbackoff(struct cam_periph *periph,
173 // struct cam_ed *device);
174 static void probedone(struct cam_periph *periph, union ccb *done_ccb);
175 static void probecleanup(struct cam_periph *periph);
176 static void scsi_find_quirk(struct cam_ed *device);
177 static void ata_scan_bus(struct cam_periph *periph, union ccb *ccb);
178 static void ata_scan_lun(struct cam_periph *periph,
179 struct cam_path *path, cam_flags flags,
181 static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
182 static struct cam_ed *
183 ata_alloc_device(struct cam_eb *bus, struct cam_et *target,
185 static void ata_device_transport(struct cam_path *path);
186 static void scsi_set_transfer_settings(struct ccb_trans_settings *cts,
187 struct cam_ed *device,
189 static void scsi_toggle_tags(struct cam_path *path);
190 static void ata_dev_async(u_int32_t async_code,
192 struct cam_et *target,
193 struct cam_ed *device,
195 static void ata_action(union ccb *start_ccb);
197 static struct xpt_xport ata_xport = {
198 .alloc_device = ata_alloc_device,
199 .action = ata_action,
200 .async = ata_dev_async,
215 proberegister(struct cam_periph *periph, void *arg)
217 union ccb *request_ccb; /* CCB representing the probe request */
221 request_ccb = (union ccb *)arg;
222 if (periph == NULL) {
223 printf("proberegister: periph was NULL!!\n");
224 return(CAM_REQ_CMP_ERR);
227 if (request_ccb == NULL) {
228 printf("proberegister: no probe CCB, "
229 "can't register device\n");
230 return(CAM_REQ_CMP_ERR);
233 softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT);
236 printf("proberegister: Unable to probe new device. "
237 "Unable to allocate softc\n");
238 return(CAM_REQ_CMP_ERR);
240 TAILQ_INIT(&softc->request_ccbs);
241 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
244 periph->softc = softc;
245 softc->periph = periph;
246 softc->action = PROBE_INVALID;
247 status = cam_periph_acquire(periph);
248 if (status != CAM_REQ_CMP) {
254 * Ensure we've waited at least a bus settle
255 * delay before attempting to probe the device.
256 * For HBAs that don't do bus resets, this won't make a difference.
258 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
260 probeschedule(periph);
265 probeschedule(struct cam_periph *periph)
267 struct ccb_pathinq cpi;
271 softc = (probe_softc *)periph->softc;
272 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
274 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
275 cpi.ccb_h.func_code = XPT_PATH_INQ;
276 xpt_action((union ccb *)&cpi);
278 if (periph->path->device->flags & CAM_DEV_UNCONFIGURED)
279 PROBE_SET_ACTION(softc, PROBE_RESET);
280 else if (periph->path->device->protocol == PROTO_SATAPM)
281 PROBE_SET_ACTION(softc, PROBE_PM_PID);
283 PROBE_SET_ACTION(softc, PROBE_IDENTIFY);
285 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
286 softc->flags |= PROBE_NO_ANNOUNCE;
288 softc->flags &= ~PROBE_NO_ANNOUNCE;
290 xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
294 probestart(struct cam_periph *periph, union ccb *start_ccb)
296 /* Probe the device that our peripheral driver points to */
297 struct ccb_ataio *ataio;
298 struct ccb_scsiio *csio;
299 struct ccb_trans_settings cts;
302 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
304 softc = (probe_softc *)periph->softc;
305 ataio = &start_ccb->ataio;
306 csio = &start_ccb->csio;
308 switch (softc->action) {
310 if (start_ccb->ccb_h.target_id == 15) {
311 /* Report SIM that we have no knowledge about PM presence. */
312 bzero(&cts, sizeof(cts));
313 xpt_setup_ccb(&cts.ccb_h, start_ccb->ccb_h.path, 1);
314 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
315 cts.type = CTS_TYPE_CURRENT_SETTINGS;
316 cts.xport_specific.sata.pm_present = 0;
317 cts.xport_specific.sata.valid = CTS_SATA_VALID_PM;
318 xpt_action((union ccb *)&cts);
320 cam_fill_ataio(ataio,
323 /*flags*/CAM_DIR_NONE,
327 (start_ccb->ccb_h.target_id == 15 ? 3 : 15) * 1000);
328 ata_reset_cmd(ataio);
332 struct ata_params *ident_buf =
333 &periph->path->device->ident_data;
335 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
336 /* Prepare check that it is the same device. */
341 (unsigned char *)ident_buf->model,
342 sizeof(ident_buf->model));
344 (unsigned char *)ident_buf->revision,
345 sizeof(ident_buf->revision));
347 (unsigned char *)ident_buf->serial,
348 sizeof(ident_buf->serial));
349 MD5Final(softc->digest, &context);
351 cam_fill_ataio(ataio,
356 /*data_ptr*/(u_int8_t *)ident_buf,
357 /*dxfer_len*/sizeof(struct ata_params),
359 if (periph->path->device->protocol == PROTO_ATA)
360 ata_28bit_cmd(ataio, ATA_ATA_IDENTIFY, 0, 0, 0);
362 ata_28bit_cmd(ataio, ATA_ATAPI_IDENTIFY, 0, 0, 0);
367 struct ata_params *ident_buf =
368 &periph->path->device->ident_data;
370 cam_fill_ataio(ataio,
373 /*flags*/CAM_DIR_NONE,
378 ata_28bit_cmd(ataio, ATA_SETFEATURES, ATA_SF_SETXFER, 0,
379 ata_max_mode(ident_buf, ATA_UDMA6, ATA_UDMA6));
383 case PROBE_FULL_INQUIRY:
386 struct scsi_inquiry_data *inq_buf =
387 &periph->path->device->inq_data;
389 if (softc->action == PROBE_INQUIRY)
390 inquiry_len = SHORT_INQUIRY_LENGTH;
392 inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
394 * Some parallel SCSI devices fail to send an
395 * ignore wide residue message when dealing with
396 * odd length inquiry requests. Round up to be
399 inquiry_len = roundup2(inquiry_len, 2);
409 /*timeout*/60 * 1000);
413 cam_fill_ataio(ataio,
416 /*flags*/CAM_DIR_NONE,
421 ata_pm_read_cmd(ataio, 0, 15);
424 cam_fill_ataio(ataio,
427 /*flags*/CAM_DIR_NONE,
432 ata_pm_read_cmd(ataio, 1, 15);
435 cam_fill_ataio(ataio,
438 /*flags*/CAM_DIR_NONE,
443 ata_pm_read_cmd(ataio, 2, 15);
447 struct ata_params *ident_buf =
448 &periph->path->device->ident_data;
449 cam_fill_ataio(ataio,
452 /*flags*/CAM_DIR_NONE,
457 ata_pm_write_cmd(ataio, 2, softc->pm_step,
458 (ident_buf->cylinders & (1 << softc->pm_step)) ? 0 : 1);
459 printf("PM RESET %d %04x %d\n", softc->pm_step, ident_buf->cylinders,
460 (ident_buf->cylinders & (1 << softc->pm_step)) ? 0 : 1);
463 case PROBE_PM_CONNECT:
464 cam_fill_ataio(ataio,
467 /*flags*/CAM_DIR_NONE,
472 ata_pm_write_cmd(ataio, 2, softc->pm_step, 0);
475 cam_fill_ataio(ataio,
478 /*flags*/CAM_DIR_NONE,
483 ata_pm_read_cmd(ataio, 0, softc->pm_step);
486 cam_fill_ataio(ataio,
489 /*flags*/CAM_DIR_NONE,
494 ata_pm_write_cmd(ataio, 1, softc->pm_step, 0xFFFFFFFF);
497 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO,
498 ("probestart: invalid action state\n"));
502 xpt_action(start_ccb);
506 proberequestdefaultnegotiation(struct cam_periph *periph)
508 struct ccb_trans_settings cts;
510 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
511 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
512 cts.type = CTS_TYPE_USER_SETTINGS;
513 xpt_action((union ccb *)&cts);
514 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
517 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
518 cts.type = CTS_TYPE_CURRENT_SETTINGS;
519 xpt_action((union ccb *)&cts);
523 * Backoff Negotiation Code- only pertinent for SPI devices.
526 proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
528 struct ccb_trans_settings cts;
529 struct ccb_trans_settings_spi *spi;
531 memset(&cts, 0, sizeof (cts));
532 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
533 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
534 cts.type = CTS_TYPE_CURRENT_SETTINGS;
535 xpt_action((union ccb *)&cts);
536 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
538 xpt_print(periph->path,
539 "failed to get current device settings\n");
543 if (cts.transport != XPORT_SPI) {
545 xpt_print(periph->path, "not SPI transport\n");
549 spi = &cts.xport_specific.spi;
552 * We cannot renegotiate sync rate if we don't have one.
554 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
556 xpt_print(periph->path, "no sync rate known\n");
562 * We'll assert that we don't have to touch PPR options- the
563 * SIM will see what we do with period and offset and adjust
564 * the PPR options as appropriate.
568 * A sync rate with unknown or zero offset is nonsensical.
569 * A sync period of zero means Async.
571 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
572 || spi->sync_offset == 0 || spi->sync_period == 0) {
574 xpt_print(periph->path, "no sync rate available\n");
579 if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
580 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
581 ("hit async: giving up on DV\n"));
587 * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
588 * We don't try to remember 'last' settings to see if the SIM actually
589 * gets into the speed we want to set. We check on the SIM telling
590 * us that a requested speed is bad, but otherwise don't try and
591 * check the speed due to the asynchronous and handshake nature
594 spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
597 if (spi->sync_period >= 0xf) {
598 spi->sync_period = 0;
599 spi->sync_offset = 0;
600 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
601 ("setting to async for DV\n"));
603 * Once we hit async, we don't want to try
606 device->flags |= CAM_DEV_DV_HIT_BOTTOM;
607 } else if (bootverbose) {
608 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
609 ("DV: period 0x%x\n", spi->sync_period));
610 printf("setting period to 0x%x\n", spi->sync_period);
612 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
613 cts.type = CTS_TYPE_CURRENT_SETTINGS;
614 xpt_action((union ccb *)&cts);
615 if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
618 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
619 ("DV: failed to set period 0x%x\n", spi->sync_period));
620 if (spi->sync_period == 0) {
628 probedone(struct cam_periph *periph, union ccb *done_ccb)
630 struct ata_params *ident_buf;
632 struct cam_path *path;
636 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
638 softc = (probe_softc *)periph->softc;
639 path = done_ccb->ccb_h.path;
640 priority = done_ccb->ccb_h.pinfo.priority;
641 ident_buf = &path->device->ident_data;
643 switch (softc->action) {
645 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
646 int sign = (done_ccb->ataio.res.lba_high << 8) +
647 done_ccb->ataio.res.lba_mid;
648 xpt_print(path, "SIGNATURE: %04x\n", sign);
649 if (sign == 0x0000 &&
650 done_ccb->ccb_h.target_id != 15) {
651 path->device->protocol = PROTO_ATA;
652 PROBE_SET_ACTION(softc, PROBE_IDENTIFY);
653 } else if (sign == 0x9669 &&
654 done_ccb->ccb_h.target_id == 15) {
655 struct ccb_trans_settings cts;
657 /* Report SIM that PM is present. */
658 bzero(&cts, sizeof(cts));
659 xpt_setup_ccb(&cts.ccb_h, path, 1);
660 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
661 cts.type = CTS_TYPE_CURRENT_SETTINGS;
662 cts.xport_specific.sata.pm_present = 1;
663 cts.xport_specific.sata.valid = CTS_SATA_VALID_PM;
664 xpt_action((union ccb *)&cts);
665 path->device->protocol = PROTO_SATAPM;
666 PROBE_SET_ACTION(softc, PROBE_PM_PID);
667 } else if (sign == 0xeb14 &&
668 done_ccb->ccb_h.target_id != 15) {
669 path->device->protocol = PROTO_SCSI;
670 PROBE_SET_ACTION(softc, PROBE_IDENTIFY);
672 if (done_ccb->ccb_h.target_id != 15) {
674 "Unexpected signature 0x%04x\n", sign);
676 xpt_release_ccb(done_ccb);
679 xpt_release_ccb(done_ccb);
680 xpt_schedule(periph, priority);
682 } else if (cam_periph_error(done_ccb, 0, 0,
683 &softc->saved_ccb) == ERESTART) {
685 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
686 /* Don't wedge the queue */
687 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
693 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
696 for (ptr = (int16_t *)ident_buf;
697 ptr < (int16_t *)ident_buf + sizeof(struct ata_params)/2; ptr++) {
698 *ptr = le16toh(*ptr);
700 if (strncmp(ident_buf->model, "FX", 2) &&
701 strncmp(ident_buf->model, "NEC", 3) &&
702 strncmp(ident_buf->model, "Pioneer", 7) &&
703 strncmp(ident_buf->model, "SHARP", 5)) {
704 ata_bswap(ident_buf->model, sizeof(ident_buf->model));
705 ata_bswap(ident_buf->revision, sizeof(ident_buf->revision));
706 ata_bswap(ident_buf->serial, sizeof(ident_buf->serial));
708 ata_btrim(ident_buf->model, sizeof(ident_buf->model));
709 ata_bpack(ident_buf->model, ident_buf->model, sizeof(ident_buf->model));
710 ata_btrim(ident_buf->revision, sizeof(ident_buf->revision));
711 ata_bpack(ident_buf->revision, ident_buf->revision, sizeof(ident_buf->revision));
712 ata_btrim(ident_buf->serial, sizeof(ident_buf->serial));
713 ata_bpack(ident_buf->serial, ident_buf->serial, sizeof(ident_buf->serial));
715 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
716 /* Check that it is the same device. */
722 (unsigned char *)ident_buf->model,
723 sizeof(ident_buf->model));
725 (unsigned char *)ident_buf->revision,
726 sizeof(ident_buf->revision));
728 (unsigned char *)ident_buf->serial,
729 sizeof(ident_buf->serial));
730 MD5Final(digest, &context);
731 if (bcmp(digest, softc->digest, sizeof(digest))) {
732 /* Device changed. */
733 xpt_async(AC_LOST_DEVICE, path, NULL);
735 xpt_release_ccb(done_ccb);
739 /* Clean up from previous instance of this device */
740 if (path->device->serial_num != NULL) {
741 free(path->device->serial_num, M_CAMXPT);
742 path->device->serial_num = NULL;
743 path->device->serial_num_len = 0;
745 path->device->serial_num =
746 (u_int8_t *)malloc((sizeof(ident_buf->serial) + 1),
748 if (path->device->serial_num != NULL) {
749 bcopy(ident_buf->serial,
750 path->device->serial_num,
751 sizeof(ident_buf->serial));
752 path->device->serial_num[sizeof(ident_buf->serial)]
754 path->device->serial_num_len =
755 strlen(path->device->serial_num);
758 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
760 scsi_find_quirk(path->device);
761 ata_device_transport(path);
763 PROBE_SET_ACTION(softc, PROBE_SETMODE);
764 xpt_release_ccb(done_ccb);
765 xpt_schedule(periph, priority);
767 } else if (cam_periph_error(done_ccb, 0, 0,
768 &softc->saved_ccb) == ERESTART) {
770 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
771 /* Don't wedge the queue */
772 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
777 * If we get to this point, we got an error status back
778 * from the inquiry and the error status doesn't require
779 * automatically retrying the command. Therefore, the
780 * inquiry failed. If we had inquiry information before
781 * for this device, but this latest inquiry command failed,
782 * the device has probably gone away. If this device isn't
783 * already marked unconfigured, notify the peripheral
784 * drivers that this device is no more.
786 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
787 /* Send the async notification. */
788 xpt_async(AC_LOST_DEVICE, path, NULL);
790 xpt_release_ccb(done_ccb);
795 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
796 if (path->device->protocol == PROTO_ATA) {
797 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
798 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
799 xpt_action(done_ccb);
800 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
802 xpt_release_ccb(done_ccb);
805 PROBE_SET_ACTION(softc, PROBE_INQUIRY);
806 xpt_release_ccb(done_ccb);
807 xpt_schedule(periph, priority);
810 } else if (cam_periph_error(done_ccb, 0, 0,
811 &softc->saved_ccb) == ERESTART) {
813 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
814 /* Don't wedge the queue */
815 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
821 case PROBE_FULL_INQUIRY:
823 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
824 struct scsi_inquiry_data *inq_buf;
825 u_int8_t periph_qual;
827 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
828 inq_buf = &path->device->inq_data;
830 periph_qual = SID_QUAL(inq_buf);
832 if (periph_qual == SID_QUAL_LU_CONNECTED) {
836 * We conservatively request only
837 * SHORT_INQUIRY_LEN bytes of inquiry
838 * information during our first try
839 * at sending an INQUIRY. If the device
840 * has more information to give,
841 * perform a second request specifying
842 * the amount of information the device
843 * is willing to give.
845 len = inq_buf->additional_length
846 + offsetof(struct scsi_inquiry_data,
847 additional_length) + 1;
848 if (softc->action == PROBE_INQUIRY
849 && len > SHORT_INQUIRY_LENGTH) {
850 PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY);
851 xpt_release_ccb(done_ccb);
852 xpt_schedule(periph, priority);
856 scsi_find_quirk(path->device);
858 // scsi_devise_transport(path);
859 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
860 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
861 xpt_action(done_ccb);
862 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
864 xpt_release_ccb(done_ccb);
867 } else if (cam_periph_error(done_ccb, 0, 0,
868 &softc->saved_ccb) == ERESTART) {
870 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
871 /* Don't wedge the queue */
872 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
878 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
879 if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0)
880 bzero(ident_buf, sizeof(*ident_buf));
881 softc->pm_pid = (done_ccb->ataio.res.lba_high << 24) +
882 (done_ccb->ataio.res.lba_mid << 16) +
883 (done_ccb->ataio.res.lba_low << 8) +
884 done_ccb->ataio.res.sector_count;
885 printf("PM Product ID: %08x\n", softc->pm_pid);
886 snprintf(ident_buf->model, sizeof(ident_buf->model),
887 "Port Multiplier %08x", softc->pm_pid);
888 PROBE_SET_ACTION(softc, PROBE_PM_PRV);
889 xpt_release_ccb(done_ccb);
890 xpt_schedule(periph, priority);
892 } else if (cam_periph_error(done_ccb, 0, 0,
893 &softc->saved_ccb) == ERESTART) {
895 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
896 /* Don't wedge the queue */
897 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
902 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
903 softc->pm_prv = (done_ccb->ataio.res.lba_high << 24) +
904 (done_ccb->ataio.res.lba_mid << 16) +
905 (done_ccb->ataio.res.lba_low << 8) +
906 done_ccb->ataio.res.sector_count;
907 printf("PM Revision: %08x\n", softc->pm_prv);
908 snprintf(ident_buf->revision, sizeof(ident_buf->revision),
909 "%04x", softc->pm_prv);
910 PROBE_SET_ACTION(softc, PROBE_PM_PORTS);
911 xpt_release_ccb(done_ccb);
912 xpt_schedule(periph, priority);
914 } else if (cam_periph_error(done_ccb, 0, 0,
915 &softc->saved_ccb) == ERESTART) {
917 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
918 /* Don't wedge the queue */
919 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
924 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
925 softc->pm_ports = (done_ccb->ataio.res.lba_high << 24) +
926 (done_ccb->ataio.res.lba_mid << 16) +
927 (done_ccb->ataio.res.lba_low << 8) +
928 done_ccb->ataio.res.sector_count;
929 /* This PM declares 6 ports, while only 5 of them are real.
930 * Port 5 is enclosure management bridge port, which has implementation
931 * problems, causing probe faults. Hide it for now. */
932 if (softc->pm_pid == 0x37261095 && softc->pm_ports == 6)
934 /* This PM declares 7 ports, while only 5 of them are real.
935 * Port 5 is some fake "Config Disk" with 640 sectors size,
936 * port 6 is enclosure management bridge port.
937 * Both fake ports has implementation problems, causing
938 * probe faults. Hide them for now. */
939 if (softc->pm_pid == 0x47261095 && softc->pm_ports == 7)
941 printf("PM ports: %d\n", softc->pm_ports);
942 ident_buf->config = softc->pm_ports;
943 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
945 PROBE_SET_ACTION(softc, PROBE_PM_RESET);
946 xpt_release_ccb(done_ccb);
947 xpt_schedule(periph, priority);
949 } else if (cam_periph_error(done_ccb, 0, 0,
950 &softc->saved_ccb) == ERESTART) {
952 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
953 /* Don't wedge the queue */
954 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
959 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
961 if (softc->pm_step < softc->pm_ports) {
962 xpt_release_ccb(done_ccb);
963 xpt_schedule(periph, priority);
968 printf("PM reset done\n");
969 PROBE_SET_ACTION(softc, PROBE_PM_CONNECT);
970 xpt_release_ccb(done_ccb);
971 xpt_schedule(periph, priority);
974 } else if (cam_periph_error(done_ccb, 0, 0,
975 &softc->saved_ccb) == ERESTART) {
977 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
978 /* Don't wedge the queue */
979 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
983 case PROBE_PM_CONNECT:
984 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
986 if (softc->pm_step < softc->pm_ports) {
987 xpt_release_ccb(done_ccb);
988 xpt_schedule(periph, priority);
993 printf("PM connect done\n");
994 PROBE_SET_ACTION(softc, PROBE_PM_CHECK);
995 xpt_release_ccb(done_ccb);
996 xpt_schedule(periph, priority);
999 } else if (cam_periph_error(done_ccb, 0, 0,
1000 &softc->saved_ccb) == ERESTART) {
1002 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1003 /* Don't wedge the queue */
1004 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1008 case PROBE_PM_CHECK:
1009 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1010 int res = (done_ccb->ataio.res.lba_high << 24) +
1011 (done_ccb->ataio.res.lba_mid << 16) +
1012 (done_ccb->ataio.res.lba_low << 8) +
1013 done_ccb->ataio.res.sector_count;
1014 if ((res & 0xf0f) == 0x103 && (res & 0x0f0) != 0) {
1015 printf("PM status: %d - %08x\n", softc->pm_step, res);
1016 ident_buf->cylinders |= (1 << softc->pm_step);
1019 if (softc->pm_try < 100) {
1023 printf("PM status: %d - %08x\n", softc->pm_step, res);
1024 ident_buf->cylinders &= ~(1 << softc->pm_step);
1028 if (softc->pm_step < softc->pm_ports) {
1029 xpt_release_ccb(done_ccb);
1030 xpt_schedule(periph, priority);
1034 PROBE_SET_ACTION(softc, PROBE_PM_CLEAR);
1035 xpt_release_ccb(done_ccb);
1036 xpt_schedule(periph, priority);
1039 } else if (cam_periph_error(done_ccb, 0, 0,
1040 &softc->saved_ccb) == ERESTART) {
1042 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1043 /* Don't wedge the queue */
1044 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1048 case PROBE_PM_CLEAR:
1049 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1051 if (softc->pm_step < softc->pm_ports) {
1052 xpt_release_ccb(done_ccb);
1053 xpt_schedule(periph, priority);
1056 found = ident_buf->cylinders | 0x8000;
1057 if (path->device->flags & CAM_DEV_UNCONFIGURED) {
1058 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
1059 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
1060 xpt_action(done_ccb);
1061 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
1063 xpt_release_ccb(done_ccb);
1066 } else if (cam_periph_error(done_ccb, 0, 0,
1067 &softc->saved_ccb) == ERESTART) {
1069 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1070 /* Don't wedge the queue */
1071 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
1076 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_INFO,
1077 ("probedone: invalid action state\n"));
1081 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
1082 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
1083 done_ccb->ccb_h.status = CAM_REQ_CMP;
1084 done_ccb->ccb_h.ppriv_field1 = found;
1086 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
1087 cam_periph_invalidate(periph);
1088 cam_periph_release_locked(periph);
1090 probeschedule(periph);
1095 probecleanup(struct cam_periph *periph)
1097 free(periph->softc, M_CAMXPT);
1101 scsi_find_quirk(struct cam_ed *device)
1103 struct scsi_quirk_entry *quirk;
1106 match = cam_quirkmatch((caddr_t)&device->inq_data,
1107 (caddr_t)scsi_quirk_table,
1108 sizeof(scsi_quirk_table) /
1109 sizeof(*scsi_quirk_table),
1110 sizeof(*scsi_quirk_table), scsi_inquiry_match);
1113 panic("xpt_find_quirk: device didn't match wildcard entry!!");
1115 quirk = (struct scsi_quirk_entry *)match;
1116 device->quirk = quirk;
1117 device->mintags = quirk->mintags;
1118 device->maxtags = quirk->maxtags;
1122 union ccb *request_ccb;
1123 struct ccb_pathinq *cpi;
1126 } ata_scan_bus_info;
1129 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
1130 * As the scan progresses, xpt_scan_bus is used as the
1131 * callback on completion function.
1134 ata_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
1136 struct cam_path *path;
1137 ata_scan_bus_info *scan_info;
1138 union ccb *work_ccb;
1141 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
1142 ("xpt_scan_bus\n"));
1143 switch (request_ccb->ccb_h.func_code) {
1145 /* Find out the characteristics of the bus */
1146 work_ccb = xpt_alloc_ccb_nowait();
1147 if (work_ccb == NULL) {
1148 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1149 xpt_done(request_ccb);
1152 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
1153 request_ccb->ccb_h.pinfo.priority);
1154 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
1155 xpt_action(work_ccb);
1156 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
1157 request_ccb->ccb_h.status = work_ccb->ccb_h.status;
1158 xpt_free_ccb(work_ccb);
1159 xpt_done(request_ccb);
1163 /* Save some state for use while we probe for devices */
1164 scan_info = (ata_scan_bus_info *)
1165 malloc(sizeof(ata_scan_bus_info), M_CAMXPT, M_NOWAIT);
1166 if (scan_info == NULL) {
1167 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1168 xpt_done(request_ccb);
1171 scan_info->request_ccb = request_ccb;
1172 scan_info->cpi = &work_ccb->cpi;
1173 scan_info->found = 0x8001;
1174 scan_info->counter = 0;
1175 /* If PM supported, probe it first. */
1176 if (scan_info->cpi->hba_inquiry & PI_SATAPM)
1177 scan_info->counter = 15;
1179 work_ccb = xpt_alloc_ccb_nowait();
1180 if (work_ccb == NULL) {
1181 free(scan_info, M_CAMXPT);
1182 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1183 xpt_done(request_ccb);
1188 work_ccb = request_ccb;
1189 /* Reuse the same CCB to query if a device was really found */
1190 scan_info = (ata_scan_bus_info *)work_ccb->ccb_h.ppriv_ptr0;
1191 /* Free the current request path- we're done with it. */
1192 xpt_free_path(work_ccb->ccb_h.path);
1193 /* If there is PM... */
1194 if (scan_info->counter == 15) {
1195 if (work_ccb->ccb_h.ppriv_field1 != 0) {
1196 /* Save PM probe result. */
1197 scan_info->found = work_ccb->ccb_h.ppriv_field1;
1199 struct ccb_trans_settings cts;
1201 /* Report SIM that PM is absent. */
1202 bzero(&cts, sizeof(cts));
1203 xpt_setup_ccb(&cts.ccb_h,
1204 scan_info->request_ccb->ccb_h.path, 1);
1205 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
1206 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1207 cts.xport_specific.sata.pm_present = 0;
1208 cts.xport_specific.sata.valid = CTS_SATA_VALID_PM;
1209 xpt_action((union ccb *)&cts);
1213 /* Take next device. Wrap from 15 (PM) to 0. */
1214 scan_info->counter = (scan_info->counter + 1 ) & 0x0f;
1215 if (scan_info->counter >= scan_info->cpi->max_target+1) {
1216 xpt_free_ccb(work_ccb);
1217 xpt_free_ccb((union ccb *)scan_info->cpi);
1218 request_ccb = scan_info->request_ccb;
1219 free(scan_info, M_CAMXPT);
1220 request_ccb->ccb_h.status = CAM_REQ_CMP;
1221 xpt_done(request_ccb);
1225 status = xpt_create_path(&path, xpt_periph,
1226 scan_info->request_ccb->ccb_h.path_id,
1227 scan_info->counter, 0);
1228 if (status != CAM_REQ_CMP) {
1229 printf("xpt_scan_bus: xpt_create_path failed"
1230 " with status %#x, bus scan halted\n",
1232 xpt_free_ccb(work_ccb);
1233 xpt_free_ccb((union ccb *)scan_info->cpi);
1234 request_ccb = scan_info->request_ccb;
1235 free(scan_info, M_CAMXPT);
1236 request_ccb->ccb_h.status = status;
1237 xpt_done(request_ccb);
1240 if ((scan_info->found & (1 << scan_info->counter)) == 0) {
1241 xpt_async(AC_LOST_DEVICE, path, NULL);
1242 xpt_free_path(path);
1245 xpt_setup_ccb(&work_ccb->ccb_h, path,
1246 scan_info->request_ccb->ccb_h.pinfo.priority);
1247 work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
1248 work_ccb->ccb_h.cbfcnp = ata_scan_bus;
1249 work_ccb->ccb_h.ppriv_ptr0 = scan_info;
1250 work_ccb->crcn.flags = scan_info->request_ccb->crcn.flags;
1251 xpt_action(work_ccb);
1259 ata_scan_lun(struct cam_periph *periph, struct cam_path *path,
1260 cam_flags flags, union ccb *request_ccb)
1262 struct ccb_pathinq cpi;
1264 struct cam_path *new_path;
1265 struct cam_periph *old_periph;
1267 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
1268 ("xpt_scan_lun\n"));
1270 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1271 cpi.ccb_h.func_code = XPT_PATH_INQ;
1272 xpt_action((union ccb *)&cpi);
1274 if (cpi.ccb_h.status != CAM_REQ_CMP) {
1275 if (request_ccb != NULL) {
1276 request_ccb->ccb_h.status = cpi.ccb_h.status;
1277 xpt_done(request_ccb);
1282 if (request_ccb == NULL) {
1283 request_ccb = malloc(sizeof(union ccb), M_CAMXPT, M_NOWAIT);
1284 if (request_ccb == NULL) {
1285 xpt_print(path, "xpt_scan_lun: can't allocate CCB, "
1286 "can't continue\n");
1289 new_path = malloc(sizeof(*new_path), M_CAMXPT, M_NOWAIT);
1290 if (new_path == NULL) {
1291 xpt_print(path, "xpt_scan_lun: can't allocate path, "
1292 "can't continue\n");
1293 free(request_ccb, M_CAMXPT);
1296 status = xpt_compile_path(new_path, xpt_periph,
1298 path->target->target_id,
1299 path->device->lun_id);
1301 if (status != CAM_REQ_CMP) {
1302 xpt_print(path, "xpt_scan_lun: can't compile path, "
1303 "can't continue\n");
1304 free(request_ccb, M_CAMXPT);
1305 free(new_path, M_CAMXPT);
1308 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
1309 request_ccb->ccb_h.cbfcnp = xptscandone;
1310 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
1311 request_ccb->crcn.flags = flags;
1314 if ((old_periph = cam_periph_find(path, "aprobe")) != NULL) {
1317 softc = (probe_softc *)old_periph->softc;
1318 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
1321 status = cam_periph_alloc(proberegister, NULL, probecleanup,
1322 probestart, "aprobe",
1324 request_ccb->ccb_h.path, NULL, 0,
1327 if (status != CAM_REQ_CMP) {
1328 xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
1329 "returned an error, can't continue probe\n");
1330 request_ccb->ccb_h.status = status;
1331 xpt_done(request_ccb);
1337 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
1339 xpt_release_path(done_ccb->ccb_h.path);
1340 free(done_ccb->ccb_h.path, M_CAMXPT);
1341 free(done_ccb, M_CAMXPT);
1344 static struct cam_ed *
1345 ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
1347 struct cam_path path;
1348 struct scsi_quirk_entry *quirk;
1349 struct cam_ed *device;
1350 struct cam_ed *cur_device;
1352 device = xpt_alloc_device(bus, target, lun_id);
1357 * Take the default quirk entry until we have inquiry
1358 * data and can determine a better quirk to use.
1360 quirk = &scsi_quirk_table[scsi_quirk_table_size - 1];
1361 device->quirk = (void *)quirk;
1362 device->mintags = quirk->mintags;
1363 device->maxtags = quirk->maxtags;
1364 bzero(&device->inq_data, sizeof(device->inq_data));
1365 device->inq_flags = 0;
1366 device->queue_flags = 0;
1367 device->serial_num = NULL;
1368 device->serial_num_len = 0;
1371 * XXX should be limited by number of CCBs this bus can
1374 bus->sim->max_ccbs += device->ccbq.devq_openings;
1375 /* Insertion sort into our target's device list */
1376 cur_device = TAILQ_FIRST(&target->ed_entries);
1377 while (cur_device != NULL && cur_device->lun_id < lun_id)
1378 cur_device = TAILQ_NEXT(cur_device, links);
1379 if (cur_device != NULL) {
1380 TAILQ_INSERT_BEFORE(cur_device, device, links);
1382 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
1384 target->generation++;
1385 if (lun_id != CAM_LUN_WILDCARD) {
1386 xpt_compile_path(&path,
1391 ata_device_transport(&path);
1392 xpt_release_path(&path);
1399 ata_device_transport(struct cam_path *path)
1401 struct ccb_pathinq cpi;
1402 // struct ccb_trans_settings cts;
1403 struct scsi_inquiry_data *inq_buf;
1405 /* Get transport information from the SIM */
1406 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1407 cpi.ccb_h.func_code = XPT_PATH_INQ;
1408 xpt_action((union ccb *)&cpi);
1411 // if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
1412 // inq_buf = &path->device->inq_data;
1413 // path->device->protocol = cpi.protocol;
1414 // path->device->protocol_version =
1415 // inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
1416 path->device->transport = cpi.transport;
1417 path->device->transport_version = cpi.transport_version;
1420 * Any device not using SPI3 features should
1421 * be considered SPI2 or lower.
1423 if (inq_buf != NULL) {
1424 if (path->device->transport == XPORT_SPI
1425 && (inq_buf->spi3data & SID_SPI_MASK) == 0
1426 && path->device->transport_version > 2)
1427 path->device->transport_version = 2;
1429 struct cam_ed* otherdev;
1431 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
1433 otherdev = TAILQ_NEXT(otherdev, links)) {
1434 if (otherdev != path->device)
1438 if (otherdev != NULL) {
1440 * Initially assume the same versioning as
1441 * prior luns for this target.
1443 path->device->protocol_version =
1444 otherdev->protocol_version;
1445 path->device->transport_version =
1446 otherdev->transport_version;
1448 /* Until we know better, opt for safty */
1449 path->device->protocol_version = 2;
1450 if (path->device->transport == XPORT_SPI)
1451 path->device->transport_version = 2;
1453 path->device->transport_version = 0;
1459 * For a device compliant with SPC-2 we should be able
1460 * to determine the transport version supported by
1461 * scrutinizing the version descriptors in the
1465 /* Tell the controller what we think */
1466 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1467 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
1468 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1469 cts.transport = path->device->transport;
1470 cts.transport_version = path->device->transport_version;
1471 cts.protocol = path->device->protocol;
1472 cts.protocol_version = path->device->protocol_version;
1473 cts.proto_specific.valid = 0;
1474 cts.xport_specific.valid = 0;
1475 xpt_action((union ccb *)&cts);
1480 ata_action(union ccb *start_ccb)
1483 switch (start_ccb->ccb_h.func_code) {
1484 case XPT_SET_TRAN_SETTINGS:
1486 scsi_set_transfer_settings(&start_ccb->cts,
1487 start_ccb->ccb_h.path->device,
1488 /*async_update*/FALSE);
1492 ata_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
1495 ata_scan_lun(start_ccb->ccb_h.path->periph,
1496 start_ccb->ccb_h.path, start_ccb->crcn.flags,
1499 case XPT_GET_TRAN_SETTINGS:
1501 struct cam_sim *sim;
1503 sim = start_ccb->ccb_h.path->bus->sim;
1504 (*(sim->sim_action))(sim, start_ccb);
1508 xpt_action_default(start_ccb);
1514 scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
1517 struct ccb_pathinq cpi;
1518 struct ccb_trans_settings cur_cts;
1519 struct ccb_trans_settings_scsi *scsi;
1520 struct ccb_trans_settings_scsi *cur_scsi;
1521 struct cam_sim *sim;
1522 struct scsi_inquiry_data *inq_data;
1524 if (device == NULL) {
1525 cts->ccb_h.status = CAM_PATH_INVALID;
1526 xpt_done((union ccb *)cts);
1530 if (cts->protocol == PROTO_UNKNOWN
1531 || cts->protocol == PROTO_UNSPECIFIED) {
1532 cts->protocol = device->protocol;
1533 cts->protocol_version = device->protocol_version;
1536 if (cts->protocol_version == PROTO_VERSION_UNKNOWN
1537 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
1538 cts->protocol_version = device->protocol_version;
1540 if (cts->protocol != device->protocol) {
1541 xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
1542 cts->protocol, device->protocol);
1543 cts->protocol = device->protocol;
1546 if (cts->protocol_version > device->protocol_version) {
1548 xpt_print(cts->ccb_h.path, "Down reving Protocol "
1549 "Version from %d to %d?\n", cts->protocol_version,
1550 device->protocol_version);
1552 cts->protocol_version = device->protocol_version;
1555 if (cts->transport == XPORT_UNKNOWN
1556 || cts->transport == XPORT_UNSPECIFIED) {
1557 cts->transport = device->transport;
1558 cts->transport_version = device->transport_version;
1561 if (cts->transport_version == XPORT_VERSION_UNKNOWN
1562 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
1563 cts->transport_version = device->transport_version;
1565 if (cts->transport != device->transport) {
1566 xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
1567 cts->transport, device->transport);
1568 cts->transport = device->transport;
1571 if (cts->transport_version > device->transport_version) {
1573 xpt_print(cts->ccb_h.path, "Down reving Transport "
1574 "Version from %d to %d?\n", cts->transport_version,
1575 device->transport_version);
1577 cts->transport_version = device->transport_version;
1580 sim = cts->ccb_h.path->bus->sim;
1583 * Nothing more of interest to do unless
1584 * this is a device connected via the
1587 if (cts->protocol != PROTO_SCSI) {
1588 if (async_update == FALSE)
1589 (*(sim->sim_action))(sim, (union ccb *)cts);
1593 inq_data = &device->inq_data;
1594 scsi = &cts->proto_specific.scsi;
1595 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
1596 cpi.ccb_h.func_code = XPT_PATH_INQ;
1597 xpt_action((union ccb *)&cpi);
1599 /* SCSI specific sanity checking */
1600 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
1601 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
1602 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
1603 || (device->mintags == 0)) {
1605 * Can't tag on hardware that doesn't support tags,
1606 * doesn't have it enabled, or has broken tag support.
1608 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1611 if (async_update == FALSE) {
1613 * Perform sanity checking against what the
1614 * controller and device can do.
1616 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
1617 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1618 cur_cts.type = cts->type;
1619 xpt_action((union ccb *)&cur_cts);
1620 if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1623 cur_scsi = &cur_cts.proto_specific.scsi;
1624 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
1625 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1626 scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
1628 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
1629 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1632 /* SPI specific sanity checking */
1633 if (cts->transport == XPORT_SPI && async_update == FALSE) {
1635 struct ccb_trans_settings_spi *spi;
1636 struct ccb_trans_settings_spi *cur_spi;
1638 spi = &cts->xport_specific.spi;
1640 cur_spi = &cur_cts.xport_specific.spi;
1642 /* Fill in any gaps in what the user gave us */
1643 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
1644 spi->sync_period = cur_spi->sync_period;
1645 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
1646 spi->sync_period = 0;
1647 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
1648 spi->sync_offset = cur_spi->sync_offset;
1649 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
1650 spi->sync_offset = 0;
1651 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
1652 spi->ppr_options = cur_spi->ppr_options;
1653 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
1654 spi->ppr_options = 0;
1655 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
1656 spi->bus_width = cur_spi->bus_width;
1657 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
1659 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
1660 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
1661 spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
1663 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
1664 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
1665 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
1666 && (inq_data->flags & SID_Sync) == 0
1667 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
1668 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)) {
1670 spi->sync_period = 0;
1671 spi->sync_offset = 0;
1674 switch (spi->bus_width) {
1675 case MSG_EXT_WDTR_BUS_32_BIT:
1676 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
1677 || (inq_data->flags & SID_WBus32) != 0
1678 || cts->type == CTS_TYPE_USER_SETTINGS)
1679 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
1681 /* Fall Through to 16-bit */
1682 case MSG_EXT_WDTR_BUS_16_BIT:
1683 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
1684 || (inq_data->flags & SID_WBus16) != 0
1685 || cts->type == CTS_TYPE_USER_SETTINGS)
1686 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
1687 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1690 /* Fall Through to 8-bit */
1691 default: /* New bus width?? */
1692 case MSG_EXT_WDTR_BUS_8_BIT:
1693 /* All targets can do this */
1694 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1698 spi3caps = cpi.xport_specific.spi.ppr_options;
1699 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
1700 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
1701 spi3caps &= inq_data->spi3data;
1703 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
1704 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1706 if ((spi3caps & SID_SPI_IUS) == 0)
1707 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
1709 if ((spi3caps & SID_SPI_QAS) == 0)
1710 spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
1712 /* No SPI Transfer settings are allowed unless we are wide */
1713 if (spi->bus_width == 0)
1714 spi->ppr_options = 0;
1716 if ((spi->valid & CTS_SPI_VALID_DISC)
1717 && ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0)) {
1719 * Can't tag queue without disconnection.
1721 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1722 scsi->valid |= CTS_SCSI_VALID_TQ;
1726 * If we are currently performing tagged transactions to
1727 * this device and want to change its negotiation parameters,
1728 * go non-tagged for a bit to give the controller a chance to
1729 * negotiate unhampered by tag messages.
1731 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
1732 && (device->inq_flags & SID_CmdQue) != 0
1733 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
1734 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
1735 CTS_SPI_VALID_SYNC_OFFSET|
1736 CTS_SPI_VALID_BUS_WIDTH)) != 0)
1737 scsi_toggle_tags(cts->ccb_h.path);
1740 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
1741 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
1745 * If we are transitioning from tags to no-tags or
1746 * vice-versa, we need to carefully freeze and restart
1747 * the queue so that we don't overlap tagged and non-tagged
1748 * commands. We also temporarily stop tags if there is
1749 * a change in transfer negotiation settings to allow
1750 * "tag-less" negotiation.
1752 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
1753 || (device->inq_flags & SID_CmdQue) != 0)
1754 device_tagenb = TRUE;
1756 device_tagenb = FALSE;
1758 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
1759 && device_tagenb == FALSE)
1760 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
1761 && device_tagenb == TRUE)) {
1763 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
1765 * Delay change to use tags until after a
1766 * few commands have gone to this device so
1767 * the controller has time to perform transfer
1768 * negotiations without tagged messages getting
1771 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
1772 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
1774 struct ccb_relsim crs;
1776 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
1777 device->inq_flags &= ~SID_CmdQue;
1778 xpt_dev_ccbq_resize(cts->ccb_h.path,
1779 sim->max_dev_openings);
1780 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
1781 device->tag_delay_count = 0;
1783 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
1785 crs.ccb_h.func_code = XPT_REL_SIMQ;
1786 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
1788 = crs.release_timeout
1791 xpt_action((union ccb *)&crs);
1795 if (async_update == FALSE)
1796 (*(sim->sim_action))(sim, (union ccb *)cts);
1800 scsi_toggle_tags(struct cam_path *path)
1805 * Give controllers a chance to renegotiate
1806 * before starting tag operations. We
1807 * "toggle" tagged queuing off then on
1808 * which causes the tag enable command delay
1809 * counter to come into effect.
1812 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
1813 || ((dev->inq_flags & SID_CmdQue) != 0
1814 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
1815 struct ccb_trans_settings cts;
1817 xpt_setup_ccb(&cts.ccb_h, path, 1);
1818 cts.protocol = PROTO_SCSI;
1819 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
1820 cts.transport = XPORT_UNSPECIFIED;
1821 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
1822 cts.proto_specific.scsi.flags = 0;
1823 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1824 scsi_set_transfer_settings(&cts, path->device,
1825 /*async_update*/TRUE);
1826 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1827 scsi_set_transfer_settings(&cts, path->device,
1828 /*async_update*/TRUE);
1833 * Handle any per-device event notifications that require action by the XPT.
1836 ata_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
1837 struct cam_ed *device, void *async_arg)
1840 struct cam_path newpath;
1843 * We only need to handle events for real devices.
1845 if (target->target_id == CAM_TARGET_WILDCARD
1846 || device->lun_id == CAM_LUN_WILDCARD)
1850 * We need our own path with wildcards expanded to
1851 * handle certain types of events.
1853 if ((async_code == AC_SENT_BDR)
1854 || (async_code == AC_BUS_RESET)
1855 || (async_code == AC_INQ_CHANGED))
1856 status = xpt_compile_path(&newpath, NULL,
1861 status = CAM_REQ_CMP_ERR;
1863 if (status == CAM_REQ_CMP) {
1866 * Allow transfer negotiation to occur in a
1867 * tag free environment.
1869 if (async_code == AC_SENT_BDR
1870 || async_code == AC_BUS_RESET)
1871 scsi_toggle_tags(&newpath);
1873 if (async_code == AC_INQ_CHANGED) {
1875 * We've sent a start unit command, or
1876 * something similar to a device that
1877 * may have caused its inquiry data to
1878 * change. So we re-scan the device to
1879 * refresh the inquiry data for it.
1881 ata_scan_lun(newpath.periph, &newpath,
1882 CAM_EXPECT_INQ_CHANGE, NULL);
1884 xpt_release_path(&newpath);
1885 } else if (async_code == AC_LOST_DEVICE) {
1886 device->flags |= CAM_DEV_UNCONFIGURED;
1887 } else if (async_code == AC_TRANSFER_NEG) {
1888 struct ccb_trans_settings *settings;
1890 settings = (struct ccb_trans_settings *)async_arg;
1891 scsi_set_transfer_settings(settings, device,
1892 /*async_update*/TRUE);