2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/endian.h>
37 #include <sys/ctype.h>
41 #include <sys/malloc.h>
42 #include <sys/sysctl.h>
44 #include <sys/taskqueue.h>
46 #include <machine/stdarg.h>
47 #include <machine/resource.h>
48 #include <machine/bus.h>
50 #include <dev/ata/ata-all.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_sim.h>
57 #include <cam/cam_xpt_sim.h>
58 #include <cam/cam_debug.h>
62 /* device structure */
63 static d_ioctl_t ata_ioctl;
64 static struct cdevsw ata_cdevsw = {
65 .d_version = D_VERSION,
66 .d_flags = D_NEEDGIANT, /* we need this as newbus isn't mpsafe */
74 static void ata_boot_attach(void);
75 static device_t ata_add_child(device_t, struct ata_device *, int);
77 static void ataaction(struct cam_sim *sim, union ccb *ccb);
78 static void atapoll(struct cam_sim *sim);
80 static void ata_conn_event(void *, int);
81 static void bswap(int8_t *, int);
82 static void btrim(int8_t *, int);
83 static void bpack(int8_t *, int8_t *, int);
84 static void ata_interrupt_locked(void *data);
87 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
88 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
89 struct intr_config_hook *ata_delayed_attach = NULL;
90 devclass_t ata_devclass;
91 uma_zone_t ata_request_zone;
92 uma_zone_t ata_composite_zone;
95 int ata_dma_check_80pin = 1;
98 static int ata_dma = 1;
99 static int atapi_dma = 1;
102 SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
103 TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
104 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0,
105 "ATA disk DMA mode control");
106 TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin);
107 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
108 CTLFLAG_RDTUN, &ata_dma_check_80pin, 1,
109 "Check for 80pin cable before setting ATA DMA mode");
110 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma);
111 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0,
112 "ATAPI device DMA mode control");
113 TUNABLE_INT("hw.ata.wc", &ata_wc);
114 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0,
115 "ATA disk write caching");
116 TUNABLE_INT("hw.ata.setmax", &ata_setmax);
117 SYSCTL_INT(_hw_ata, OID_AUTO, setmax, CTLFLAG_RDTUN, &ata_setmax, 0,
118 "ATA disk set max native address");
121 * newbus device interface related functions
124 ata_probe(device_t dev)
130 ata_attach(device_t dev)
132 struct ata_channel *ch = device_get_softc(dev);
135 struct cam_devq *devq;
139 /* check that we have a virgin channel to attach */
143 /* initialize the softc basics */
145 ch->state = ATA_IDLE;
146 bzero(&ch->state_mtx, sizeof(struct mtx));
147 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
148 bzero(&ch->queue_mtx, sizeof(struct mtx));
149 mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF);
150 TAILQ_INIT(&ch->ata_queue);
151 TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
153 for (i = 0; i < 16; i++) {
154 ch->user[i].mode = 0;
155 if (ch->flags & ATA_SATA)
156 ch->user[i].bytecount = 8192;
158 ch->user[i].bytecount = MAXPHYS;
159 ch->curr[i] = ch->user[i];
163 /* reset the controller HW, the channel and device(s) */
164 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
169 ATA_LOCKING(dev, ATA_LF_UNLOCK);
171 /* allocate DMA resources if DMA HW present*/
175 /* setup interrupt delivery */
177 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
178 RF_SHAREABLE | RF_ACTIVE);
180 device_printf(dev, "unable to allocate interrupt\n");
183 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
184 ata_interrupt, ch, &ch->ih))) {
185 device_printf(dev, "unable to setup interrupt\n");
190 /* probe and attach devices on this channel unless we are in early boot */
191 if (!ata_delayed_attach)
195 mtx_lock(&ch->state_mtx);
196 /* Create the device queue for our SIM. */
197 devq = cam_simq_alloc(1);
199 device_printf(dev, "Unable to allocate simq\n");
203 /* Construct SIM entry */
204 ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
205 device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
206 if (ch->sim == NULL) {
207 device_printf(dev, "unable to allocate sim\n");
211 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
212 device_printf(dev, "unable to register xpt bus\n");
216 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
217 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
218 device_printf(dev, "unable to create path\n");
222 mtx_unlock(&ch->state_mtx);
226 xpt_bus_deregister(cam_sim_path(ch->sim));
228 cam_sim_free(ch->sim, /*free_devq*/TRUE);
230 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
231 mtx_unlock(&ch->state_mtx);
237 ata_detach(device_t dev)
239 struct ata_channel *ch = device_get_softc(dev);
245 /* check that we have a valid channel to detach */
249 /* grap the channel lock so no new requests gets launched */
250 mtx_lock(&ch->state_mtx);
251 ch->state |= ATA_STALL_QUEUE;
252 mtx_unlock(&ch->state_mtx);
255 /* detach & delete all children */
256 if (!device_get_children(dev, &children, &nchildren)) {
257 for (i = 0; i < nchildren; i++)
259 device_delete_child(dev, children[i]);
260 free(children, M_TEMP);
263 taskqueue_drain(taskqueue_thread, &ch->conntask);
266 mtx_lock(&ch->state_mtx);
267 xpt_async(AC_LOST_DEVICE, ch->path, NULL);
268 xpt_free_path(ch->path);
269 xpt_bus_deregister(cam_sim_path(ch->sim));
270 cam_sim_free(ch->sim, /*free_devq*/TRUE);
271 mtx_unlock(&ch->state_mtx);
274 /* release resources */
275 bus_teardown_intr(dev, ch->r_irq, ch->ih);
276 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
279 /* free DMA resources if DMA HW present*/
283 mtx_destroy(&ch->state_mtx);
284 mtx_destroy(&ch->queue_mtx);
289 ata_conn_event(void *context, int dummy)
291 device_t dev = (device_t)context;
293 struct ata_channel *ch = device_get_softc(dev);
296 mtx_lock(&ch->state_mtx);
298 mtx_unlock(&ch->state_mtx);
299 if ((ccb = xpt_alloc_ccb()) == NULL)
301 if (xpt_create_path(&ccb->ccb_h.path, NULL,
302 cam_sim_path(ch->sim),
303 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
314 ata_reinit(device_t dev)
316 struct ata_channel *ch = device_get_softc(dev);
317 struct ata_request *request;
322 /* check that we have a valid channel to reinit */
323 if (!ch || !ch->r_irq)
327 device_printf(dev, "reiniting channel ..\n");
329 /* poll for locking the channel */
330 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
333 /* catch eventual request in ch->running */
334 mtx_lock(&ch->state_mtx);
335 if (ch->state & ATA_STALL_QUEUE) {
336 /* Recursive reinits and reinits during detach prohobited. */
337 mtx_unlock(&ch->state_mtx);
340 if ((request = ch->running))
341 callout_stop(&request->callout);
344 /* unconditionally grap the channel lock */
345 ch->state |= ATA_STALL_QUEUE;
346 mtx_unlock(&ch->state_mtx);
348 /* reset the controller HW, the channel and device(s) */
351 /* reinit the children and delete any that fails */
352 if (!device_get_children(dev, &children, &nchildren)) {
353 mtx_lock(&Giant); /* newbus suckage it needs Giant */
354 for (i = 0; i < nchildren; i++) {
355 /* did any children go missing ? */
356 if (children[i] && device_is_attached(children[i]) &&
357 ATA_REINIT(children[i])) {
359 * if we had a running request and its device matches
360 * this child we need to inform the request that the
363 if (request && request->dev == children[i]) {
364 request->result = ENXIO;
365 device_printf(request->dev, "FAILURE - device detached\n");
367 /* if not timeout finish request here */
368 if (!(request->flags & ATA_R_TIMEOUT))
372 device_delete_child(dev, children[i]);
375 free(children, M_TEMP);
376 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */
379 /* if we still have a good request put it on the queue again */
380 if (request && !(request->flags & ATA_R_TIMEOUT)) {
381 device_printf(request->dev,
382 "WARNING - %s requeued due to channel reset",
383 ata_cmd2str(request));
384 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
385 printf(" LBA=%ju", request->u.ata.lba);
387 request->flags |= ATA_R_REQUEUE;
388 ata_queue_request(request);
391 /* we're done release the channel for new work */
392 mtx_lock(&ch->state_mtx);
393 ch->state = ATA_IDLE;
394 mtx_unlock(&ch->state_mtx);
395 ATA_LOCKING(dev, ATA_LF_UNLOCK);
397 /* Add new children. */
398 /* ata_identify(dev); */
401 device_printf(dev, "reinit done ..\n");
403 /* kick off requests on the queue */
406 xpt_freeze_simq(ch->sim, 1);
407 if ((request = ch->running)) {
409 if (ch->state == ATA_ACTIVE)
410 ch->state = ATA_IDLE;
411 callout_stop(&request->callout);
413 ch->dma.unload(request);
414 request->result = ERESTART;
415 ata_cam_end_transaction(dev, request);
417 /* reset the controller HW, the channel and device(s) */
419 /* Tell the XPT about the event */
420 xpt_async(AC_BUS_RESET, ch->path, NULL);
421 xpt_release_simq(ch->sim, TRUE);
427 ata_suspend(device_t dev)
429 struct ata_channel *ch;
431 /* check for valid device */
432 if (!dev || !(ch = device_get_softc(dev)))
436 mtx_lock(&ch->state_mtx);
437 xpt_freeze_simq(ch->sim, 1);
438 while (ch->state != ATA_IDLE)
439 msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100);
440 mtx_unlock(&ch->state_mtx);
442 /* wait for the channel to be IDLE or detached before suspending */
444 mtx_lock(&ch->state_mtx);
445 if (ch->state == ATA_IDLE) {
446 ch->state = ATA_ACTIVE;
447 mtx_unlock(&ch->state_mtx);
450 mtx_unlock(&ch->state_mtx);
451 tsleep(ch, PRIBIO, "atasusp", hz/10);
453 ATA_LOCKING(dev, ATA_LF_UNLOCK);
459 ata_resume(device_t dev)
461 struct ata_channel *ch;
464 /* check for valid device */
465 if (!dev || !(ch = device_get_softc(dev)))
469 mtx_lock(&ch->state_mtx);
470 error = ata_reinit(dev);
471 xpt_release_simq(ch->sim, TRUE);
472 mtx_unlock(&ch->state_mtx);
474 /* reinit the devices, we dont know what mode/state they are in */
475 error = ata_reinit(dev);
476 /* kick off requests on the queue */
483 ata_interrupt(void *data)
486 struct ata_channel *ch = (struct ata_channel *)data;
488 mtx_lock(&ch->state_mtx);
490 ata_interrupt_locked(data);
492 mtx_unlock(&ch->state_mtx);
497 ata_interrupt_locked(void *data)
499 struct ata_channel *ch = (struct ata_channel *)data;
500 struct ata_request *request;
503 mtx_lock(&ch->state_mtx);
506 /* ignore interrupt if its not for us */
507 if (ch->hw.status && !ch->hw.status(ch->dev))
510 /* do we have a running request */
511 if (!(request = ch->running))
514 ATA_DEBUG_RQ(request, "interrupt");
516 /* safetycheck for the right state */
517 if (ch->state == ATA_IDLE) {
518 device_printf(request->dev, "interrupt on idle channel ignored\n");
523 * we have the HW locks, so end the transaction for this request
524 * if it finishes immediately otherwise wait for next interrupt
526 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
528 if (ch->state == ATA_ACTIVE)
529 ch->state = ATA_IDLE;
531 ata_cam_end_transaction(ch->dev, request);
533 mtx_unlock(&ch->state_mtx);
534 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
541 mtx_unlock(&ch->state_mtx);
546 ata_print_cable(device_t dev, u_int8_t *who)
549 "DMA limited to UDMA33, %s found non-ATA66 cable\n", who);
553 ata_check_80pin(device_t dev, int mode)
555 struct ata_device *atadev = device_get_softc(dev);
557 if (!ata_dma_check_80pin) {
559 device_printf(dev, "Skipping 80pin cable check\n");
563 if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) {
564 ata_print_cable(dev, "device");
571 ata_setmode(device_t dev)
573 struct ata_channel *ch = device_get_softc(device_get_parent(dev));
574 struct ata_device *atadev = device_get_softc(dev);
575 int error, mode, pmode;
579 pmode = mode = ata_limit_mode(dev, mode, ATA_DMA_MAX);
580 mode = ATA_SETMODE(device_get_parent(dev), atadev->unit, mode);
581 if ((ch->flags & (ATA_CHECKS_CABLE | ATA_SATA)) == 0)
582 mode = ata_check_80pin(dev, mode);
583 } while (pmode != mode); /* Interate till successfull negotiation. */
584 error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode);
586 device_printf(dev, "%ssetting %s\n",
587 (error) ? "FAILURE " : "", ata_mode2str(mode));
592 * device related interfaces
596 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
597 int32_t flag, struct thread *td)
599 device_t device, *children;
600 struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data;
601 int *value = (int *)data;
602 int i, nchildren, error = ENOTTY;
605 case IOCATAGMAXCHANNEL:
606 /* In case we have channel 0..n this will return n+1. */
607 *value = devclass_get_maxunit(ata_devclass);
612 if (*value >= devclass_get_maxunit(ata_devclass) ||
613 !(device = devclass_get_device(ata_devclass, *value)) ||
614 !device_is_attached(device))
616 error = ata_reinit(device);
620 if (*value >= devclass_get_maxunit(ata_devclass) ||
621 !(device = devclass_get_device(ata_devclass, *value)) ||
622 !device_is_attached(device))
624 error = DEVICE_ATTACH(device);
628 if (*value >= devclass_get_maxunit(ata_devclass) ||
629 !(device = devclass_get_device(ata_devclass, *value)) ||
630 !device_is_attached(device))
632 error = DEVICE_DETACH(device);
636 if (devices->channel >= devclass_get_maxunit(ata_devclass) ||
637 !(device = devclass_get_device(ata_devclass, devices->channel)) ||
638 !device_is_attached(device))
640 bzero(devices->name[0], 32);
641 bzero(&devices->params[0], sizeof(struct ata_params));
642 bzero(devices->name[1], 32);
643 bzero(&devices->params[1], sizeof(struct ata_params));
644 if (!device_get_children(device, &children, &nchildren)) {
645 for (i = 0; i < nchildren; i++) {
646 if (children[i] && device_is_attached(children[i])) {
647 struct ata_device *atadev = device_get_softc(children[i]);
649 if (atadev->unit == ATA_MASTER) { /* XXX SOS PM */
650 strncpy(devices->name[0],
651 device_get_nameunit(children[i]), 32);
652 bcopy(&atadev->param, &devices->params[0],
653 sizeof(struct ata_params));
655 if (atadev->unit == ATA_SLAVE) { /* XXX SOS PM */
656 strncpy(devices->name[1],
657 device_get_nameunit(children[i]), 32);
658 bcopy(&atadev->param, &devices->params[1],
659 sizeof(struct ata_params));
663 free(children, M_TEMP);
671 if (ata_raid_ioctl_func)
672 error = ata_raid_ioctl_func(cmd, data);
679 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data)
681 struct ata_device *atadev = device_get_softc(dev);
682 struct ata_channel *ch = device_get_softc(device_get_parent(dev));
683 struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data;
684 struct ata_params *params = (struct ata_params *)data;
685 int *mode = (int *)data;
686 struct ata_request *request;
692 if (ioc_request->count >
693 (ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS)) {
696 if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) {
699 if (!(request = ata_alloc_request())) {
703 request->dev = atadev->dev;
704 if (ioc_request->flags & ATA_CMD_WRITE) {
705 error = copyin(ioc_request->data, buf, ioc_request->count);
708 ata_free_request(request);
712 if (ioc_request->flags & ATA_CMD_ATAPI) {
713 request->flags = ATA_R_ATAPI;
714 bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16);
717 request->u.ata.command = ioc_request->u.ata.command;
718 request->u.ata.feature = ioc_request->u.ata.feature;
719 request->u.ata.lba = ioc_request->u.ata.lba;
720 request->u.ata.count = ioc_request->u.ata.count;
722 request->timeout = ioc_request->timeout;
724 request->bytecount = ioc_request->count;
725 request->transfersize = request->bytecount;
726 if (ioc_request->flags & ATA_CMD_CONTROL)
727 request->flags |= ATA_R_CONTROL;
728 if (ioc_request->flags & ATA_CMD_READ)
729 request->flags |= ATA_R_READ;
730 if (ioc_request->flags & ATA_CMD_WRITE)
731 request->flags |= ATA_R_WRITE;
732 ata_queue_request(request);
733 if (request->flags & ATA_R_ATAPI) {
734 bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense,
735 sizeof(struct atapi_sense));
738 ioc_request->u.ata.command = request->u.ata.command;
739 ioc_request->u.ata.feature = request->u.ata.feature;
740 ioc_request->u.ata.lba = request->u.ata.lba;
741 ioc_request->u.ata.count = request->u.ata.count;
743 ioc_request->error = request->result;
744 if (ioc_request->flags & ATA_CMD_READ)
745 error = copyout(buf, ioc_request->data, ioc_request->count);
749 ata_free_request(request);
753 ata_getparam(atadev, 0);
754 bcopy(&atadev->param, params, sizeof(struct ata_params));
758 atadev->mode = *mode;
763 *mode = atadev->mode |
764 (ATA_GETREV(device_get_parent(dev), atadev->unit) << 8);
766 case IOCATASSPINDOWN:
767 atadev->spindown = *mode;
769 case IOCATAGSPINDOWN:
770 *mode = atadev->spindown;
779 ata_boot_attach(void)
781 struct ata_channel *ch;
784 mtx_lock(&Giant); /* newbus suckage it needs Giant */
786 /* kick of probe and attach on all channels */
787 for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) {
788 if ((ch = devclass_get_softc(ata_devclass, ctlr))) {
789 ata_identify(ch->dev);
793 /* release the hook that got us here, we are only needed once during boot */
794 if (ata_delayed_attach) {
795 config_intrhook_disestablish(ata_delayed_attach);
796 free(ata_delayed_attach, M_TEMP);
797 ata_delayed_attach = NULL;
800 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */
805 * misc support functions
809 ata_add_child(device_t parent, struct ata_device *atadev, int unit)
813 if ((child = device_add_child(parent, NULL, unit))) {
814 device_set_softc(child, atadev);
817 atadev->max_iosize = DEV_BSIZE;
818 atadev->mode = ATA_PIO_MAX;
825 ata_getparam(struct ata_device *atadev, int init)
827 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
828 struct ata_request *request;
829 u_int8_t command = 0;
830 int error = ENOMEM, retries = 2;
832 if (ch->devices & (ATA_ATA_MASTER << atadev->unit))
833 command = ATA_ATA_IDENTIFY;
834 if (ch->devices & (ATA_ATAPI_MASTER << atadev->unit))
835 command = ATA_ATAPI_IDENTIFY;
839 while (retries-- > 0 && error) {
840 if (!(request = ata_alloc_request()))
842 request->dev = atadev->dev;
843 request->timeout = 1;
844 request->retries = 0;
845 request->u.ata.command = command;
846 request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT);
848 request->flags |= ATA_R_QUIET;
849 request->data = (void *)&atadev->param;
850 request->bytecount = sizeof(struct ata_params);
851 request->donecount = 0;
852 request->transfersize = DEV_BSIZE;
853 ata_queue_request(request);
854 error = request->result;
855 ata_free_request(request);
858 if (!error && (isprint(atadev->param.model[0]) ||
859 isprint(atadev->param.model[1]))) {
860 struct ata_params *atacap = &atadev->param;
863 for (ptr = (int16_t *)atacap;
864 ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) {
865 *ptr = le16toh(*ptr);
867 if (!(!strncmp(atacap->model, "FX", 2) ||
868 !strncmp(atacap->model, "NEC", 3) ||
869 !strncmp(atacap->model, "Pioneer", 7) ||
870 !strncmp(atacap->model, "SHARP", 5))) {
871 bswap(atacap->model, sizeof(atacap->model));
872 bswap(atacap->revision, sizeof(atacap->revision));
873 bswap(atacap->serial, sizeof(atacap->serial));
875 btrim(atacap->model, sizeof(atacap->model));
876 bpack(atacap->model, atacap->model, sizeof(atacap->model));
877 btrim(atacap->revision, sizeof(atacap->revision));
878 bpack(atacap->revision, atacap->revision, sizeof(atacap->revision));
879 btrim(atacap->serial, sizeof(atacap->serial));
880 bpack(atacap->serial, atacap->serial, sizeof(atacap->serial));
883 printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n",
884 device_get_unit(ch->dev),
885 ata_unit2str(atadev),
886 ata_mode2str(ata_pmode(atacap)),
887 ata_mode2str(ata_wmode(atacap)),
888 ata_mode2str(ata_umode(atacap)),
889 (atacap->hwres & ATA_CABLE_ID) ? "80":"40");
894 sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision);
895 device_set_desc_copy(atadev->dev, buffer);
896 if ((atadev->param.config & ATA_PROTO_ATAPI) &&
897 (atadev->param.config != ATA_CFA_MAGIC1) &&
898 (atadev->param.config != ATA_CFA_MAGIC2)) {
900 (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR &&
901 ata_umode(&atadev->param) >= ATA_UDMA2)
902 atadev->mode = ATA_DMA_MAX;
906 (ata_umode(&atadev->param) > 0 ||
907 ata_wmode(&atadev->param) > 0))
908 atadev->mode = ATA_DMA_MAX;
921 ata_identify(device_t dev)
923 struct ata_channel *ch = device_get_softc(dev);
924 struct ata_device *atadev;
926 device_t child, master = NULL;
927 int nchildren, i, n = ch->devices;
930 device_printf(dev, "Identifying devices: %08x\n", ch->devices);
933 /* Skip existing devices. */
934 if (!device_get_children(dev, &children, &nchildren)) {
935 for (i = 0; i < nchildren; i++) {
936 if (children[i] && (atadev = device_get_softc(children[i])))
937 n &= ~((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << atadev->unit);
939 free(children, M_TEMP);
941 /* Create new devices. */
943 device_printf(dev, "New devices: %08x\n", n);
948 for (i = 0; i < ATA_PM; ++i) {
949 if (n & (((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << i))) {
952 if (!(atadev = malloc(sizeof(struct ata_device),
953 M_ATA, M_NOWAIT | M_ZERO))) {
954 device_printf(dev, "out of memory\n");
959 if (n & (ATA_ATA_MASTER << i))
960 unit = (device_get_unit(dev) << 1) + i;
962 if ((child = ata_add_child(dev, atadev, unit))) {
964 * PATA slave should be identified first, to allow
965 * device cable detection on master to work properly.
967 if (i == 0 && (n & ATA_PORTMULTIPLIER) == 0 &&
968 (n & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << 1)) != 0) {
972 if (ata_getparam(atadev, 1)) {
973 device_delete_child(dev, child);
982 atadev = device_get_softc(master);
983 if (ata_getparam(atadev, 1)) {
984 device_delete_child(dev, master);
988 bus_generic_probe(dev);
989 bus_generic_attach(dev);
996 ata_default_registers(device_t dev)
998 struct ata_channel *ch = device_get_softc(dev);
1000 /* fill in the defaults from whats setup already */
1001 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
1002 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
1003 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
1004 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
1005 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
1006 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
1007 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
1008 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
1012 ata_modify_if_48bit(struct ata_request *request)
1014 struct ata_channel *ch = device_get_softc(request->parent);
1015 struct ata_device *atadev = device_get_softc(request->dev);
1017 request->flags &= ~ATA_R_48BIT;
1019 if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA ||
1020 request->u.ata.count > 256) &&
1021 atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1023 /* translate command into 48bit version */
1024 switch (request->u.ata.command) {
1026 request->u.ata.command = ATA_READ48;
1029 request->u.ata.command = ATA_READ_MUL48;
1032 if (ch->flags & ATA_NO_48BIT_DMA) {
1033 if (request->transfersize > DEV_BSIZE)
1034 request->u.ata.command = ATA_READ_MUL48;
1036 request->u.ata.command = ATA_READ48;
1037 request->flags &= ~ATA_R_DMA;
1040 request->u.ata.command = ATA_READ_DMA48;
1042 case ATA_READ_DMA_QUEUED:
1043 if (ch->flags & ATA_NO_48BIT_DMA) {
1044 if (request->transfersize > DEV_BSIZE)
1045 request->u.ata.command = ATA_READ_MUL48;
1047 request->u.ata.command = ATA_READ48;
1048 request->flags &= ~ATA_R_DMA;
1051 request->u.ata.command = ATA_READ_DMA_QUEUED48;
1054 request->u.ata.command = ATA_WRITE48;
1057 request->u.ata.command = ATA_WRITE_MUL48;
1060 if (ch->flags & ATA_NO_48BIT_DMA) {
1061 if (request->transfersize > DEV_BSIZE)
1062 request->u.ata.command = ATA_WRITE_MUL48;
1064 request->u.ata.command = ATA_WRITE48;
1065 request->flags &= ~ATA_R_DMA;
1068 request->u.ata.command = ATA_WRITE_DMA48;
1070 case ATA_WRITE_DMA_QUEUED:
1071 if (ch->flags & ATA_NO_48BIT_DMA) {
1072 if (request->transfersize > DEV_BSIZE)
1073 request->u.ata.command = ATA_WRITE_MUL48;
1075 request->u.ata.command = ATA_WRITE48;
1076 request->u.ata.command = ATA_WRITE48;
1077 request->flags &= ~ATA_R_DMA;
1080 request->u.ata.command = ATA_WRITE_DMA_QUEUED48;
1082 case ATA_FLUSHCACHE:
1083 request->u.ata.command = ATA_FLUSHCACHE48;
1085 case ATA_SET_MAX_ADDRESS:
1086 request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1091 request->flags |= ATA_R_48BIT;
1093 else if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1095 /* translate command into 48bit version */
1096 switch (request->u.ata.command) {
1097 case ATA_FLUSHCACHE:
1098 request->u.ata.command = ATA_FLUSHCACHE48;
1100 case ATA_READ_NATIVE_MAX_ADDRESS:
1101 request->u.ata.command = ATA_READ_NATIVE_MAX_ADDRESS48;
1103 case ATA_SET_MAX_ADDRESS:
1104 request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1109 request->flags |= ATA_R_48BIT;
1114 ata_udelay(int interval)
1116 /* for now just use DELAY, the timer/sleep subsytems are not there yet */
1117 if (1 || interval < (1000000/hz) || ata_delayed_attach)
1120 pause("ataslp", interval/(1000000/hz));
1124 ata_unit2str(struct ata_device *atadev)
1126 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
1129 if (ch->devices & ATA_PORTMULTIPLIER)
1130 sprintf(str, "port%d", atadev->unit);
1132 sprintf(str, "%s", atadev->unit == ATA_MASTER ? "master" : "slave");
1137 ata_mode2str(int mode)
1140 case -1: return "UNSUPPORTED";
1141 case ATA_PIO0: return "PIO0";
1142 case ATA_PIO1: return "PIO1";
1143 case ATA_PIO2: return "PIO2";
1144 case ATA_PIO3: return "PIO3";
1145 case ATA_PIO4: return "PIO4";
1146 case ATA_WDMA0: return "WDMA0";
1147 case ATA_WDMA1: return "WDMA1";
1148 case ATA_WDMA2: return "WDMA2";
1149 case ATA_UDMA0: return "UDMA16";
1150 case ATA_UDMA1: return "UDMA25";
1151 case ATA_UDMA2: return "UDMA33";
1152 case ATA_UDMA3: return "UDMA40";
1153 case ATA_UDMA4: return "UDMA66";
1154 case ATA_UDMA5: return "UDMA100";
1155 case ATA_UDMA6: return "UDMA133";
1156 case ATA_SA150: return "SATA150";
1157 case ATA_SA300: return "SATA300";
1159 if (mode & ATA_DMA_MASK)
1167 ata_satarev2str(int rev)
1171 case 1: return "SATA 1.5Gb/s";
1172 case 2: return "SATA 3Gb/s";
1173 case 3: return "SATA 6Gb/s";
1174 case 0xff: return "SATA";
1175 default: return "???";
1180 ata_atapi(device_t dev, int target)
1182 struct ata_channel *ch = device_get_softc(dev);
1184 return (ch->devices & (ATA_ATAPI_MASTER << target));
1188 ata_pmode(struct ata_params *ap)
1190 if (ap->atavalid & ATA_FLAG_64_70) {
1191 if (ap->apiomodes & 0x02)
1193 if (ap->apiomodes & 0x01)
1196 if (ap->mwdmamodes & 0x04)
1198 if (ap->mwdmamodes & 0x02)
1200 if (ap->mwdmamodes & 0x01)
1202 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200)
1204 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100)
1206 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000)
1212 ata_wmode(struct ata_params *ap)
1214 if (ap->mwdmamodes & 0x04)
1216 if (ap->mwdmamodes & 0x02)
1218 if (ap->mwdmamodes & 0x01)
1224 ata_umode(struct ata_params *ap)
1226 if (ap->atavalid & ATA_FLAG_88) {
1227 if (ap->udmamodes & 0x40)
1229 if (ap->udmamodes & 0x20)
1231 if (ap->udmamodes & 0x10)
1233 if (ap->udmamodes & 0x08)
1235 if (ap->udmamodes & 0x04)
1237 if (ap->udmamodes & 0x02)
1239 if (ap->udmamodes & 0x01)
1246 ata_limit_mode(device_t dev, int mode, int maxmode)
1248 struct ata_device *atadev = device_get_softc(dev);
1250 if (maxmode && mode > maxmode)
1253 if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0)
1254 return min(mode, ata_umode(&atadev->param));
1256 if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0)
1257 return min(mode, ata_wmode(&atadev->param));
1259 if (mode > ata_pmode(&atadev->param))
1260 return min(mode, ata_pmode(&atadev->param));
1266 bswap(int8_t *buf, int len)
1268 u_int16_t *ptr = (u_int16_t*)(buf + len);
1270 while (--ptr >= (u_int16_t*)buf)
1275 btrim(int8_t *buf, int len)
1279 for (ptr = buf; ptr < buf+len; ++ptr)
1280 if (!*ptr || *ptr == '_')
1282 for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr)
1287 bpack(int8_t *src, int8_t *dst, int len)
1291 for (i = j = blank = 0 ; i < len; i++) {
1292 if (blank && src[i] == ' ') continue;
1293 if (blank && src[i] != ' ') {
1298 if (src[i] == ' ') {
1311 ata_cam_begin_transaction(device_t dev, union ccb *ccb)
1313 struct ata_channel *ch = device_get_softc(dev);
1314 struct ata_request *request;
1316 if (!(request = ata_alloc_request())) {
1317 device_printf(dev, "FAILURE - out of memory in start\n");
1318 ccb->ccb_h.status = CAM_REQ_INVALID;
1322 bzero(request, sizeof(*request));
1325 request->dev = NULL;
1326 request->parent = dev;
1327 request->unit = ccb->ccb_h.target_id;
1328 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1329 request->data = ccb->ataio.data_ptr;
1330 request->bytecount = ccb->ataio.dxfer_len;
1331 request->u.ata.command = ccb->ataio.cmd.command;
1332 request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) |
1333 (uint16_t)ccb->ataio.cmd.features;
1334 request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) |
1335 (uint16_t)ccb->ataio.cmd.sector_count;
1336 if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) {
1337 request->flags |= ATA_R_48BIT;
1338 request->u.ata.lba =
1339 ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) |
1340 ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) |
1341 ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24);
1343 request->u.ata.lba =
1344 ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24);
1346 request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) |
1347 ((uint64_t)ccb->ataio.cmd.lba_mid << 8) |
1348 (uint64_t)ccb->ataio.cmd.lba_low;
1349 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1350 ccb->ataio.cmd.flags & CAM_ATAIO_DMA)
1351 request->flags |= ATA_R_DMA;
1352 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1353 request->flags |= ATA_R_READ;
1354 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1355 request->flags |= ATA_R_WRITE;
1357 request->data = ccb->csio.data_ptr;
1358 request->bytecount = ccb->csio.dxfer_len;
1359 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
1360 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
1361 request->u.atapi.ccb, ccb->csio.cdb_len);
1362 request->flags |= ATA_R_ATAPI;
1363 if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
1364 request->flags |= ATA_R_ATAPI16;
1365 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1366 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
1367 request->flags |= ATA_R_DMA;
1368 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1369 request->flags |= ATA_R_READ;
1370 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1371 request->flags |= ATA_R_WRITE;
1373 request->transfersize = min(request->bytecount,
1374 ch->curr[ccb->ccb_h.target_id].bytecount);
1375 request->retries = 0;
1376 request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
1377 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
1380 ch->running = request;
1381 ch->state = ATA_ACTIVE;
1382 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
1384 ch->state = ATA_IDLE;
1385 ata_cam_end_transaction(dev, request);
1391 ata_cam_end_transaction(device_t dev, struct ata_request *request)
1393 struct ata_channel *ch = device_get_softc(dev);
1394 union ccb *ccb = request->ccb;
1397 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1398 if (request->flags & ATA_R_TIMEOUT) {
1399 xpt_freeze_simq(ch->sim, 1);
1400 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1401 ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ;
1403 } else if (request->status & ATA_S_ERROR) {
1404 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1405 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
1407 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1408 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1410 } else if (request->result == ERESTART)
1411 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1412 else if (request->result != 0)
1413 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1415 ccb->ccb_h.status |= CAM_REQ_CMP;
1416 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
1417 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
1418 xpt_freeze_devq(ccb->ccb_h.path, 1);
1419 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1421 if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1422 ((request->status & ATA_S_ERROR) ||
1423 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) {
1424 struct ata_res *res = &ccb->ataio.res;
1425 res->status = request->status;
1426 res->error = request->error;
1427 res->lba_low = request->u.ata.lba;
1428 res->lba_mid = request->u.ata.lba >> 8;
1429 res->lba_high = request->u.ata.lba >> 16;
1430 res->device = request->u.ata.lba >> 24;
1431 res->lba_low_exp = request->u.ata.lba >> 24;
1432 res->lba_mid_exp = request->u.ata.lba >> 32;
1433 res->lba_high_exp = request->u.ata.lba >> 40;
1434 res->sector_count = request->u.ata.count;
1435 res->sector_count_exp = request->u.ata.count >> 8;
1437 ata_free_request(request);
1439 /* Do error recovery if needed. */
1445 ata_check_ids(device_t dev, union ccb *ccb)
1447 struct ata_channel *ch = device_get_softc(dev);
1449 if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) {
1450 ccb->ccb_h.status = CAM_TID_INVALID;
1454 if (ccb->ccb_h.target_lun != 0) {
1455 ccb->ccb_h.status = CAM_LUN_INVALID;
1463 ataaction(struct cam_sim *sim, union ccb *ccb)
1466 struct ata_channel *ch;
1468 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n",
1469 ccb->ccb_h.func_code));
1471 ch = (struct ata_channel *)cam_sim_softc(sim);
1473 switch (ccb->ccb_h.func_code) {
1474 /* Common cases first */
1475 case XPT_ATA_IO: /* Execute the requested I/O operation */
1477 if (ata_check_ids(dev, ccb))
1479 if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER)
1480 << ccb->ccb_h.target_id)) == 0) {
1481 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1485 device_printf(dev, "already running!\n");
1486 if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1487 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1488 (ccb->ataio.cmd.control & ATA_A_RESET)) {
1489 struct ata_res *res = &ccb->ataio.res;
1491 bzero(res, sizeof(*res));
1492 if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) {
1496 res->lba_high = 0xeb;
1497 res->lba_mid = 0x14;
1499 ccb->ccb_h.status = CAM_REQ_CMP;
1502 ata_cam_begin_transaction(dev, ccb);
1504 case XPT_EN_LUN: /* Enable LUN as a target */
1505 case XPT_TARGET_IO: /* Execute target I/O request */
1506 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
1507 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/
1508 case XPT_ABORT: /* Abort the specified CCB */
1510 ccb->ccb_h.status = CAM_REQ_INVALID;
1512 case XPT_SET_TRAN_SETTINGS:
1514 struct ccb_trans_settings *cts = &ccb->cts;
1515 struct ata_cam_device *d;
1517 if (ata_check_ids(dev, ccb))
1519 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1520 d = &ch->curr[ccb->ccb_h.target_id];
1522 d = &ch->user[ccb->ccb_h.target_id];
1523 if (ch->flags & ATA_SATA) {
1524 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
1525 d->revision = cts->xport_specific.sata.revision;
1526 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) {
1527 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1528 d->mode = ATA_SETMODE(ch->dev,
1529 ccb->ccb_h.target_id,
1530 cts->xport_specific.sata.mode);
1532 d->mode = cts->xport_specific.sata.mode;
1534 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
1535 d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
1536 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
1537 d->atapi = cts->xport_specific.sata.atapi;
1539 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) {
1540 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1541 d->mode = ATA_SETMODE(ch->dev,
1542 ccb->ccb_h.target_id,
1543 cts->xport_specific.ata.mode);
1545 d->mode = cts->xport_specific.ata.mode;
1547 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT)
1548 d->bytecount = cts->xport_specific.ata.bytecount;
1549 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI)
1550 d->atapi = cts->xport_specific.ata.atapi;
1552 ccb->ccb_h.status = CAM_REQ_CMP;
1555 case XPT_GET_TRAN_SETTINGS:
1557 struct ccb_trans_settings *cts = &ccb->cts;
1558 struct ata_cam_device *d;
1560 if (ata_check_ids(dev, ccb))
1562 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1563 d = &ch->curr[ccb->ccb_h.target_id];
1565 d = &ch->user[ccb->ccb_h.target_id];
1566 cts->protocol = PROTO_ATA;
1567 cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
1568 if (ch->flags & ATA_SATA) {
1569 cts->transport = XPORT_SATA;
1570 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1571 cts->xport_specific.sata.valid = 0;
1572 cts->xport_specific.sata.mode = d->mode;
1573 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
1574 cts->xport_specific.sata.bytecount = d->bytecount;
1575 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
1576 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1577 cts->xport_specific.sata.revision =
1578 ATA_GETREV(dev, ccb->ccb_h.target_id);
1579 if (cts->xport_specific.sata.revision != 0xff) {
1580 cts->xport_specific.sata.valid |=
1581 CTS_SATA_VALID_REVISION;
1584 cts->xport_specific.sata.revision = d->revision;
1585 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
1587 cts->xport_specific.sata.atapi = d->atapi;
1588 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
1590 cts->transport = XPORT_ATA;
1591 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1592 cts->xport_specific.ata.valid = 0;
1593 cts->xport_specific.ata.mode = d->mode;
1594 cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE;
1595 cts->xport_specific.ata.bytecount = d->bytecount;
1596 cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT;
1597 cts->xport_specific.ata.atapi = d->atapi;
1598 cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI;
1600 ccb->ccb_h.status = CAM_REQ_CMP;
1603 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1604 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1606 ccb->ccb_h.status = CAM_REQ_CMP;
1608 case XPT_TERM_IO: /* Terminate the I/O process */
1610 ccb->ccb_h.status = CAM_REQ_INVALID;
1612 case XPT_PATH_INQ: /* Path routing inquiry */
1614 struct ccb_pathinq *cpi = &ccb->cpi;
1616 cpi->version_num = 1; /* XXX??? */
1617 cpi->hba_inquiry = PI_SDTR_ABLE;
1618 cpi->target_sprt = 0;
1619 cpi->hba_misc = PIM_SEQSCAN;
1620 cpi->hba_eng_cnt = 0;
1621 if (ch->flags & ATA_NO_SLAVE)
1622 cpi->max_target = 0;
1624 cpi->max_target = 1;
1626 cpi->initiator_id = 0;
1627 cpi->bus_id = cam_sim_bus(sim);
1628 if (ch->flags & ATA_SATA)
1629 cpi->base_transfer_speed = 150000;
1631 cpi->base_transfer_speed = 3300;
1632 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1633 strncpy(cpi->hba_vid, "ATA", HBA_IDLEN);
1634 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1635 cpi->unit_number = cam_sim_unit(sim);
1636 if (ch->flags & ATA_SATA)
1637 cpi->transport = XPORT_SATA;
1639 cpi->transport = XPORT_ATA;
1640 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
1641 cpi->protocol = PROTO_ATA;
1642 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
1643 cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS;
1644 cpi->ccb_h.status = CAM_REQ_CMP;
1648 ccb->ccb_h.status = CAM_REQ_INVALID;
1655 atapoll(struct cam_sim *sim)
1657 struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim);
1659 ata_interrupt_locked(ch);
1667 ata_module_event_handler(module_t mod, int what, void *arg)
1670 static struct cdev *atacdev;
1676 /* register controlling device */
1677 atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");
1680 /* register boot attach to be run when interrupts are enabled */
1681 if (!(ata_delayed_attach = (struct intr_config_hook *)
1682 malloc(sizeof(struct intr_config_hook),
1683 M_TEMP, M_NOWAIT | M_ZERO))) {
1684 printf("ata: malloc of delayed attach hook failed\n");
1687 ata_delayed_attach->ich_func = (void*)ata_boot_attach;
1688 if (config_intrhook_establish(ata_delayed_attach) != 0) {
1689 printf("ata: config_intrhook_establish failed\n");
1690 free(ata_delayed_attach, M_TEMP);
1698 /* deregister controlling device */
1699 destroy_dev(atacdev);
1708 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1709 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
1710 MODULE_VERSION(ata, 1);
1712 MODULE_DEPEND(ata, cam, 1, 1, 1);
1718 ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request),
1719 NULL, NULL, NULL, NULL, 0, 0);
1720 ata_composite_zone = uma_zcreate("ata_composite",
1721 sizeof(struct ata_composite),
1722 NULL, NULL, NULL, NULL, 0, 0);
1724 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL);
1729 uma_zdestroy(ata_composite_zone);
1730 uma_zdestroy(ata_request_zone);
1732 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL);