2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/endian.h>
37 #include <sys/ctype.h>
41 #include <sys/malloc.h>
42 #include <sys/sysctl.h>
44 #include <sys/taskqueue.h>
46 #include <machine/stdarg.h>
47 #include <machine/resource.h>
48 #include <machine/bus.h>
50 #include <dev/ata/ata-all.h>
51 #include <dev/pci/pcivar.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_xpt_sim.h>
59 #include <cam/cam_debug.h>
63 /* device structure */
64 static d_ioctl_t ata_ioctl;
65 static struct cdevsw ata_cdevsw = {
66 .d_version = D_VERSION,
67 .d_flags = D_NEEDGIANT, /* we need this as newbus isn't mpsafe */
75 static void ata_boot_attach(void);
76 static device_t ata_add_child(device_t, struct ata_device *, int);
78 static void ataaction(struct cam_sim *sim, union ccb *ccb);
79 static void atapoll(struct cam_sim *sim);
81 static void ata_conn_event(void *, int);
83 static void bswap(int8_t *, int);
84 static void btrim(int8_t *, int);
85 static void bpack(int8_t *, int8_t *, int);
87 static void ata_interrupt_locked(void *data);
89 static void ata_periodic_poll(void *data);
93 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
94 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
96 struct intr_config_hook *ata_delayed_attach = NULL;
98 devclass_t ata_devclass;
99 uma_zone_t ata_request_zone;
100 uma_zone_t ata_composite_zone;
105 int ata_dma_check_80pin = 1;
109 static int ata_dma = 1;
110 static int atapi_dma = 1;
114 static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
116 TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
117 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0,
118 "ATA disk DMA mode control");
120 TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin);
121 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
122 CTLFLAG_RW, &ata_dma_check_80pin, 1,
123 "Check for 80pin cable before setting ATA DMA mode");
125 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma);
126 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0,
127 "ATAPI device DMA mode control");
128 TUNABLE_INT("hw.ata.wc", &ata_wc);
129 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0,
130 "ATA disk write caching");
131 TUNABLE_INT("hw.ata.setmax", &ata_setmax);
132 SYSCTL_INT(_hw_ata, OID_AUTO, setmax, CTLFLAG_RDTUN, &ata_setmax, 0,
133 "ATA disk set max native address");
136 FEATURE(ata_cam, "ATA devices are accessed through the cam(4) driver");
140 * newbus device interface related functions
143 ata_probe(device_t dev)
149 ata_attach(device_t dev)
151 struct ata_channel *ch = device_get_softc(dev);
154 struct cam_devq *devq;
160 /* check that we have a virgin channel to attach */
164 /* initialize the softc basics */
166 ch->state = ATA_IDLE;
167 bzero(&ch->state_mtx, sizeof(struct mtx));
168 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
169 bzero(&ch->queue_mtx, sizeof(struct mtx));
170 mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF);
171 TAILQ_INIT(&ch->ata_queue);
172 TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
174 for (i = 0; i < 16; i++) {
175 ch->user[i].revision = 0;
176 snprintf(buf, sizeof(buf), "dev%d.sata_rev", i);
177 if (resource_int_value(device_get_name(dev),
178 device_get_unit(dev), buf, &mode) != 0 &&
179 resource_int_value(device_get_name(dev),
180 device_get_unit(dev), "sata_rev", &mode) != 0)
183 ch->user[i].revision = mode;
184 ch->user[i].mode = 0;
185 snprintf(buf, sizeof(buf), "dev%d.mode", i);
186 if (resource_string_value(device_get_name(dev),
187 device_get_unit(dev), buf, &res) == 0)
188 mode = ata_str2mode(res);
189 else if (resource_string_value(device_get_name(dev),
190 device_get_unit(dev), "mode", &res) == 0)
191 mode = ata_str2mode(res);
195 ch->user[i].mode = mode;
196 if (ch->flags & ATA_SATA)
197 ch->user[i].bytecount = 8192;
199 ch->user[i].bytecount = MAXPHYS;
200 ch->user[i].caps = 0;
201 ch->curr[i] = ch->user[i];
202 if (ch->pm_level > 0)
203 ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ;
204 if (ch->pm_level > 1)
205 ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ;
207 callout_init(&ch->poll_callout, 1);
211 /* reset the controller HW, the channel and device(s) */
212 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
215 ATA_LOCKING(dev, ATA_LF_UNLOCK);
218 /* allocate DMA resources if DMA HW present*/
222 /* setup interrupt delivery */
224 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
225 RF_SHAREABLE | RF_ACTIVE);
227 device_printf(dev, "unable to allocate interrupt\n");
230 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
231 ata_interrupt, ch, &ch->ih))) {
232 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
233 device_printf(dev, "unable to setup interrupt\n");
238 /* probe and attach devices on this channel unless we are in early boot */
239 if (!ata_delayed_attach)
243 if (ch->flags & ATA_PERIODIC_POLL)
244 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
245 mtx_lock(&ch->state_mtx);
246 /* Create the device queue for our SIM. */
247 devq = cam_simq_alloc(1);
249 device_printf(dev, "Unable to allocate simq\n");
253 /* Construct SIM entry */
254 ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
255 device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
256 if (ch->sim == NULL) {
257 device_printf(dev, "unable to allocate sim\n");
262 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
263 device_printf(dev, "unable to register xpt bus\n");
267 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
268 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
269 device_printf(dev, "unable to create path\n");
273 mtx_unlock(&ch->state_mtx);
277 xpt_bus_deregister(cam_sim_path(ch->sim));
279 cam_sim_free(ch->sim, /*free_devq*/TRUE);
282 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
283 mtx_unlock(&ch->state_mtx);
284 if (ch->flags & ATA_PERIODIC_POLL)
285 callout_drain(&ch->poll_callout);
291 ata_detach(device_t dev)
293 struct ata_channel *ch = device_get_softc(dev);
299 /* check that we have a valid channel to detach */
303 /* grap the channel lock so no new requests gets launched */
304 mtx_lock(&ch->state_mtx);
305 ch->state |= ATA_STALL_QUEUE;
306 mtx_unlock(&ch->state_mtx);
308 if (ch->flags & ATA_PERIODIC_POLL)
309 callout_drain(&ch->poll_callout);
313 /* detach & delete all children */
314 if (!device_get_children(dev, &children, &nchildren)) {
315 for (i = 0; i < nchildren; i++)
317 device_delete_child(dev, children[i]);
318 free(children, M_TEMP);
321 taskqueue_drain(taskqueue_thread, &ch->conntask);
324 mtx_lock(&ch->state_mtx);
325 xpt_async(AC_LOST_DEVICE, ch->path, NULL);
326 xpt_free_path(ch->path);
327 xpt_bus_deregister(cam_sim_path(ch->sim));
328 cam_sim_free(ch->sim, /*free_devq*/TRUE);
330 mtx_unlock(&ch->state_mtx);
333 /* release resources */
334 bus_teardown_intr(dev, ch->r_irq, ch->ih);
335 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
338 /* free DMA resources if DMA HW present*/
342 mtx_destroy(&ch->state_mtx);
343 mtx_destroy(&ch->queue_mtx);
348 ata_conn_event(void *context, int dummy)
350 device_t dev = (device_t)context;
352 struct ata_channel *ch = device_get_softc(dev);
355 mtx_lock(&ch->state_mtx);
356 if (ch->sim == NULL) {
357 mtx_unlock(&ch->state_mtx);
361 if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
363 if (xpt_create_path(&ccb->ccb_h.path, NULL,
364 cam_sim_path(ch->sim),
365 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
370 mtx_unlock(&ch->state_mtx);
377 ata_reinit(device_t dev)
379 struct ata_channel *ch = device_get_softc(dev);
380 struct ata_request *request;
385 /* check that we have a valid channel to reinit */
386 if (!ch || !ch->r_irq)
390 device_printf(dev, "reiniting channel ..\n");
392 /* poll for locking the channel */
393 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
396 /* catch eventual request in ch->running */
397 mtx_lock(&ch->state_mtx);
398 if (ch->state & ATA_STALL_QUEUE) {
399 /* Recursive reinits and reinits during detach prohobited. */
400 mtx_unlock(&ch->state_mtx);
403 if ((request = ch->running))
404 callout_stop(&request->callout);
407 /* unconditionally grap the channel lock */
408 ch->state |= ATA_STALL_QUEUE;
409 mtx_unlock(&ch->state_mtx);
411 /* reset the controller HW, the channel and device(s) */
414 /* reinit the children and delete any that fails */
415 if (!device_get_children(dev, &children, &nchildren)) {
416 mtx_lock(&Giant); /* newbus suckage it needs Giant */
417 for (i = 0; i < nchildren; i++) {
418 /* did any children go missing ? */
419 if (children[i] && device_is_attached(children[i]) &&
420 ATA_REINIT(children[i])) {
422 * if we had a running request and its device matches
423 * this child we need to inform the request that the
426 if (request && request->dev == children[i]) {
427 request->result = ENXIO;
428 device_printf(request->dev, "FAILURE - device detached\n");
430 /* if not timeout finish request here */
431 if (!(request->flags & ATA_R_TIMEOUT))
435 device_delete_child(dev, children[i]);
438 free(children, M_TEMP);
439 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */
442 /* if we still have a good request put it on the queue again */
443 if (request && !(request->flags & ATA_R_TIMEOUT)) {
444 device_printf(request->dev,
445 "WARNING - %s requeued due to channel reset",
446 ata_cmd2str(request));
447 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
448 printf(" LBA=%ju", request->u.ata.lba);
450 request->flags |= ATA_R_REQUEUE;
451 ata_queue_request(request);
454 /* we're done release the channel for new work */
455 mtx_lock(&ch->state_mtx);
456 ch->state = ATA_IDLE;
457 mtx_unlock(&ch->state_mtx);
458 ATA_LOCKING(dev, ATA_LF_UNLOCK);
460 /* Add new children. */
461 /* ata_identify(dev); */
464 device_printf(dev, "reinit done ..\n");
466 /* kick off requests on the queue */
469 xpt_freeze_simq(ch->sim, 1);
470 if ((request = ch->running)) {
472 if (ch->state == ATA_ACTIVE)
473 ch->state = ATA_IDLE;
474 callout_stop(&request->callout);
476 ch->dma.unload(request);
477 request->result = ERESTART;
478 ata_cam_end_transaction(dev, request);
480 /* reset the controller HW, the channel and device(s) */
482 /* Tell the XPT about the event */
483 xpt_async(AC_BUS_RESET, ch->path, NULL);
484 xpt_release_simq(ch->sim, TRUE);
490 ata_suspend(device_t dev)
492 struct ata_channel *ch;
494 /* check for valid device */
495 if (!dev || !(ch = device_get_softc(dev)))
499 if (ch->flags & ATA_PERIODIC_POLL)
500 callout_drain(&ch->poll_callout);
501 mtx_lock(&ch->state_mtx);
502 xpt_freeze_simq(ch->sim, 1);
503 while (ch->state != ATA_IDLE)
504 msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100);
505 mtx_unlock(&ch->state_mtx);
507 /* wait for the channel to be IDLE or detached before suspending */
509 mtx_lock(&ch->state_mtx);
510 if (ch->state == ATA_IDLE) {
511 ch->state = ATA_ACTIVE;
512 mtx_unlock(&ch->state_mtx);
515 mtx_unlock(&ch->state_mtx);
516 tsleep(ch, PRIBIO, "atasusp", hz/10);
518 ATA_LOCKING(dev, ATA_LF_UNLOCK);
524 ata_resume(device_t dev)
526 struct ata_channel *ch;
529 /* check for valid device */
530 if (!dev || !(ch = device_get_softc(dev)))
534 mtx_lock(&ch->state_mtx);
535 error = ata_reinit(dev);
536 xpt_release_simq(ch->sim, TRUE);
537 mtx_unlock(&ch->state_mtx);
538 if (ch->flags & ATA_PERIODIC_POLL)
539 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
541 /* reinit the devices, we dont know what mode/state they are in */
542 error = ata_reinit(dev);
543 /* kick off requests on the queue */
550 ata_interrupt(void *data)
553 struct ata_channel *ch = (struct ata_channel *)data;
555 mtx_lock(&ch->state_mtx);
556 xpt_batch_start(ch->sim);
558 ata_interrupt_locked(data);
560 xpt_batch_done(ch->sim);
561 mtx_unlock(&ch->state_mtx);
566 ata_interrupt_locked(void *data)
568 struct ata_channel *ch = (struct ata_channel *)data;
569 struct ata_request *request;
572 mtx_lock(&ch->state_mtx);
575 /* ignore interrupt if its not for us */
576 if (ch->hw.status && !ch->hw.status(ch->dev))
579 /* do we have a running request */
580 if (!(request = ch->running))
583 ATA_DEBUG_RQ(request, "interrupt");
585 /* safetycheck for the right state */
586 if (ch->state == ATA_IDLE) {
587 device_printf(request->dev, "interrupt on idle channel ignored\n");
592 * we have the HW locks, so end the transaction for this request
593 * if it finishes immediately otherwise wait for next interrupt
595 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
597 if (ch->state == ATA_ACTIVE)
598 ch->state = ATA_IDLE;
600 ata_cam_end_transaction(ch->dev, request);
602 mtx_unlock(&ch->state_mtx);
603 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
610 mtx_unlock(&ch->state_mtx);
616 ata_periodic_poll(void *data)
618 struct ata_channel *ch = (struct ata_channel *)data;
620 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
626 ata_print_cable(device_t dev, u_int8_t *who)
629 "DMA limited to UDMA33, %s found non-ATA66 cable\n", who);
634 ata_check_80pin(device_t dev, int mode)
636 struct ata_device *atadev = device_get_softc(dev);
638 if (!ata_dma_check_80pin) {
640 device_printf(dev, "Skipping 80pin cable check\n");
644 if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) {
645 ata_print_cable(dev, "device");
654 ata_setmode(device_t dev)
656 struct ata_channel *ch = device_get_softc(device_get_parent(dev));
657 struct ata_device *atadev = device_get_softc(dev);
658 int error, mode, pmode;
662 pmode = mode = ata_limit_mode(dev, mode, ATA_DMA_MAX);
663 mode = ATA_SETMODE(device_get_parent(dev), atadev->unit, mode);
664 if ((ch->flags & (ATA_CHECKS_CABLE | ATA_SATA)) == 0)
665 mode = ata_check_80pin(dev, mode);
666 } while (pmode != mode); /* Interate till successfull negotiation. */
667 error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode);
669 device_printf(dev, "%ssetting %s\n",
670 (error) ? "FAILURE " : "", ata_mode2str(mode));
676 * device related interfaces
680 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
681 int32_t flag, struct thread *td)
683 device_t device, *children;
684 struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data;
685 int *value = (int *)data;
686 int i, nchildren, error = ENOTTY;
689 case IOCATAGMAXCHANNEL:
690 /* In case we have channel 0..n this will return n+1. */
691 *value = devclass_get_maxunit(ata_devclass);
696 if (*value >= devclass_get_maxunit(ata_devclass) ||
697 !(device = devclass_get_device(ata_devclass, *value)) ||
698 !device_is_attached(device))
700 error = ata_reinit(device);
704 if (*value >= devclass_get_maxunit(ata_devclass) ||
705 !(device = devclass_get_device(ata_devclass, *value)) ||
706 !device_is_attached(device))
708 error = DEVICE_ATTACH(device);
712 if (*value >= devclass_get_maxunit(ata_devclass) ||
713 !(device = devclass_get_device(ata_devclass, *value)) ||
714 !device_is_attached(device))
716 error = DEVICE_DETACH(device);
720 if (devices->channel >= devclass_get_maxunit(ata_devclass) ||
721 !(device = devclass_get_device(ata_devclass, devices->channel)) ||
722 !device_is_attached(device))
724 bzero(devices->name[0], 32);
725 bzero(&devices->params[0], sizeof(struct ata_params));
726 bzero(devices->name[1], 32);
727 bzero(&devices->params[1], sizeof(struct ata_params));
728 if (!device_get_children(device, &children, &nchildren)) {
729 for (i = 0; i < nchildren; i++) {
730 if (children[i] && device_is_attached(children[i])) {
731 struct ata_device *atadev = device_get_softc(children[i]);
733 if (atadev->unit == ATA_MASTER) { /* XXX SOS PM */
734 strncpy(devices->name[0],
735 device_get_nameunit(children[i]), 32);
736 bcopy(&atadev->param, &devices->params[0],
737 sizeof(struct ata_params));
739 if (atadev->unit == ATA_SLAVE) { /* XXX SOS PM */
740 strncpy(devices->name[1],
741 device_get_nameunit(children[i]), 32);
742 bcopy(&atadev->param, &devices->params[1],
743 sizeof(struct ata_params));
747 free(children, M_TEMP);
755 if (ata_raid_ioctl_func)
756 error = ata_raid_ioctl_func(cmd, data);
764 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data)
766 struct ata_device *atadev = device_get_softc(dev);
767 struct ata_channel *ch = device_get_softc(device_get_parent(dev));
768 struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data;
769 struct ata_params *params = (struct ata_params *)data;
770 int *mode = (int *)data;
771 struct ata_request *request;
777 if (ioc_request->count >
778 (ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS)) {
781 if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) {
784 if (!(request = ata_alloc_request())) {
788 request->dev = atadev->dev;
789 if (ioc_request->flags & ATA_CMD_WRITE) {
790 error = copyin(ioc_request->data, buf, ioc_request->count);
793 ata_free_request(request);
797 if (ioc_request->flags & ATA_CMD_ATAPI) {
798 request->flags = ATA_R_ATAPI;
799 bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16);
802 request->u.ata.command = ioc_request->u.ata.command;
803 request->u.ata.feature = ioc_request->u.ata.feature;
804 request->u.ata.lba = ioc_request->u.ata.lba;
805 request->u.ata.count = ioc_request->u.ata.count;
807 request->timeout = ioc_request->timeout;
809 request->bytecount = ioc_request->count;
810 request->transfersize = request->bytecount;
811 if (ioc_request->flags & ATA_CMD_CONTROL)
812 request->flags |= ATA_R_CONTROL;
813 if (ioc_request->flags & ATA_CMD_READ)
814 request->flags |= ATA_R_READ;
815 if (ioc_request->flags & ATA_CMD_WRITE)
816 request->flags |= ATA_R_WRITE;
817 ata_queue_request(request);
818 if (request->flags & ATA_R_ATAPI) {
819 bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense,
820 sizeof(struct atapi_sense));
823 ioc_request->u.ata.command = request->u.ata.command;
824 ioc_request->u.ata.feature = request->u.ata.feature;
825 ioc_request->u.ata.lba = request->u.ata.lba;
826 ioc_request->u.ata.count = request->u.ata.count;
828 ioc_request->error = request->result;
829 if (ioc_request->flags & ATA_CMD_READ)
830 error = copyout(buf, ioc_request->data, ioc_request->count);
834 ata_free_request(request);
838 ata_getparam(atadev, 0);
839 bcopy(&atadev->param, params, sizeof(struct ata_params));
843 atadev->mode = *mode;
848 *mode = atadev->mode |
849 (ATA_GETREV(device_get_parent(dev), atadev->unit) << 8);
851 case IOCATASSPINDOWN:
852 atadev->spindown = *mode;
854 case IOCATAGSPINDOWN:
855 *mode = atadev->spindown;
865 ata_boot_attach(void)
867 struct ata_channel *ch;
870 mtx_lock(&Giant); /* newbus suckage it needs Giant */
872 /* kick off probe and attach on all channels */
873 for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) {
874 if ((ch = devclass_get_softc(ata_devclass, ctlr))) {
875 ata_identify(ch->dev);
879 /* release the hook that got us here, we are only needed once during boot */
880 if (ata_delayed_attach) {
881 config_intrhook_disestablish(ata_delayed_attach);
882 free(ata_delayed_attach, M_TEMP);
883 ata_delayed_attach = NULL;
886 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */
891 * misc support functions
895 ata_add_child(device_t parent, struct ata_device *atadev, int unit)
899 if ((child = device_add_child(parent, (unit < 0) ? NULL : "ad", unit))) {
900 device_set_softc(child, atadev);
903 atadev->max_iosize = DEV_BSIZE;
904 atadev->mode = ATA_PIO_MAX;
912 ata_getparam(struct ata_device *atadev, int init)
914 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
915 struct ata_request *request;
918 u_int8_t command = 0;
919 int error = ENOMEM, retries = 2, mode = -1;
921 if (ch->devices & (ATA_ATA_MASTER << atadev->unit))
922 command = ATA_ATA_IDENTIFY;
923 if (ch->devices & (ATA_ATAPI_MASTER << atadev->unit))
924 command = ATA_ATAPI_IDENTIFY;
928 while (retries-- > 0 && error) {
929 if (!(request = ata_alloc_request()))
931 request->dev = atadev->dev;
932 request->timeout = 1;
933 request->retries = 0;
934 request->u.ata.command = command;
935 request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT);
937 request->flags |= ATA_R_QUIET;
938 request->data = (void *)&atadev->param;
939 request->bytecount = sizeof(struct ata_params);
940 request->donecount = 0;
941 request->transfersize = DEV_BSIZE;
942 ata_queue_request(request);
943 error = request->result;
944 ata_free_request(request);
947 if (!error && (isprint(atadev->param.model[0]) ||
948 isprint(atadev->param.model[1]))) {
949 struct ata_params *atacap = &atadev->param;
952 for (ptr = (int16_t *)atacap;
953 ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) {
954 *ptr = le16toh(*ptr);
956 if (!(!strncmp(atacap->model, "FX", 2) ||
957 !strncmp(atacap->model, "NEC", 3) ||
958 !strncmp(atacap->model, "Pioneer", 7) ||
959 !strncmp(atacap->model, "SHARP", 5))) {
960 bswap(atacap->model, sizeof(atacap->model));
961 bswap(atacap->revision, sizeof(atacap->revision));
962 bswap(atacap->serial, sizeof(atacap->serial));
964 btrim(atacap->model, sizeof(atacap->model));
965 bpack(atacap->model, atacap->model, sizeof(atacap->model));
966 btrim(atacap->revision, sizeof(atacap->revision));
967 bpack(atacap->revision, atacap->revision, sizeof(atacap->revision));
968 btrim(atacap->serial, sizeof(atacap->serial));
969 bpack(atacap->serial, atacap->serial, sizeof(atacap->serial));
972 printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n",
973 device_get_unit(ch->dev),
974 ata_unit2str(atadev),
975 ata_mode2str(ata_pmode(atacap)),
976 ata_mode2str(ata_wmode(atacap)),
977 ata_mode2str(ata_umode(atacap)),
978 (atacap->hwres & ATA_CABLE_ID) ? "80":"40");
983 sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision);
984 device_set_desc_copy(atadev->dev, buffer);
985 if ((atadev->param.config & ATA_PROTO_ATAPI) &&
986 (atadev->param.config != ATA_CFA_MAGIC1) &&
987 (atadev->param.config != ATA_CFA_MAGIC2)) {
989 (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR &&
990 ata_umode(&atadev->param) >= ATA_UDMA2)
991 atadev->mode = ATA_DMA_MAX;
995 (ata_umode(&atadev->param) > 0 ||
996 ata_wmode(&atadev->param) > 0))
997 atadev->mode = ATA_DMA_MAX;
999 snprintf(buf, sizeof(buf), "dev%d.mode", atadev->unit);
1000 if (resource_string_value(device_get_name(ch->dev),
1001 device_get_unit(ch->dev), buf, &res) == 0)
1002 mode = ata_str2mode(res);
1003 else if (resource_string_value(device_get_name(ch->dev),
1004 device_get_unit(ch->dev), "mode", &res) == 0)
1005 mode = ata_str2mode(res);
1007 atadev->mode = mode;
1020 ata_identify(device_t dev)
1022 struct ata_channel *ch = device_get_softc(dev);
1023 struct ata_device *atadev;
1025 device_t child, master = NULL;
1026 int nchildren, i, n = ch->devices;
1029 device_printf(dev, "Identifying devices: %08x\n", ch->devices);
1032 /* Skip existing devices. */
1033 if (!device_get_children(dev, &children, &nchildren)) {
1034 for (i = 0; i < nchildren; i++) {
1035 if (children[i] && (atadev = device_get_softc(children[i])))
1036 n &= ~((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << atadev->unit);
1038 free(children, M_TEMP);
1040 /* Create new devices. */
1042 device_printf(dev, "New devices: %08x\n", n);
1047 for (i = 0; i < ATA_PM; ++i) {
1048 if (n & (((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << i))) {
1051 if (!(atadev = malloc(sizeof(struct ata_device),
1052 M_ATA, M_NOWAIT | M_ZERO))) {
1053 device_printf(dev, "out of memory\n");
1057 #ifdef ATA_STATIC_ID
1058 if (n & (ATA_ATA_MASTER << i))
1059 unit = (device_get_unit(dev) << 1) + i;
1061 if ((child = ata_add_child(dev, atadev, unit))) {
1063 * PATA slave should be identified first, to allow
1064 * device cable detection on master to work properly.
1066 if (i == 0 && (n & ATA_PORTMULTIPLIER) == 0 &&
1067 (n & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << 1)) != 0) {
1071 if (ata_getparam(atadev, 1)) {
1072 device_delete_child(dev, child);
1073 free(atadev, M_ATA);
1077 free(atadev, M_ATA);
1081 atadev = device_get_softc(master);
1082 if (ata_getparam(atadev, 1)) {
1083 device_delete_child(dev, master);
1084 free(atadev, M_ATA);
1087 bus_generic_probe(dev);
1088 bus_generic_attach(dev);
1095 ata_default_registers(device_t dev)
1097 struct ata_channel *ch = device_get_softc(dev);
1099 /* fill in the defaults from whats setup already */
1100 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
1101 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
1102 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
1103 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
1104 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
1105 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
1106 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
1107 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
1111 ata_modify_if_48bit(struct ata_request *request)
1113 struct ata_channel *ch = device_get_softc(request->parent);
1114 struct ata_device *atadev = device_get_softc(request->dev);
1116 request->flags &= ~ATA_R_48BIT;
1118 if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA ||
1119 request->u.ata.count > 256) &&
1120 atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1122 /* translate command into 48bit version */
1123 switch (request->u.ata.command) {
1125 request->u.ata.command = ATA_READ48;
1128 request->u.ata.command = ATA_READ_MUL48;
1131 if (ch->flags & ATA_NO_48BIT_DMA) {
1132 if (request->transfersize > DEV_BSIZE)
1133 request->u.ata.command = ATA_READ_MUL48;
1135 request->u.ata.command = ATA_READ48;
1136 request->flags &= ~ATA_R_DMA;
1139 request->u.ata.command = ATA_READ_DMA48;
1141 case ATA_READ_DMA_QUEUED:
1142 if (ch->flags & ATA_NO_48BIT_DMA) {
1143 if (request->transfersize > DEV_BSIZE)
1144 request->u.ata.command = ATA_READ_MUL48;
1146 request->u.ata.command = ATA_READ48;
1147 request->flags &= ~ATA_R_DMA;
1150 request->u.ata.command = ATA_READ_DMA_QUEUED48;
1153 request->u.ata.command = ATA_WRITE48;
1156 request->u.ata.command = ATA_WRITE_MUL48;
1159 if (ch->flags & ATA_NO_48BIT_DMA) {
1160 if (request->transfersize > DEV_BSIZE)
1161 request->u.ata.command = ATA_WRITE_MUL48;
1163 request->u.ata.command = ATA_WRITE48;
1164 request->flags &= ~ATA_R_DMA;
1167 request->u.ata.command = ATA_WRITE_DMA48;
1169 case ATA_WRITE_DMA_QUEUED:
1170 if (ch->flags & ATA_NO_48BIT_DMA) {
1171 if (request->transfersize > DEV_BSIZE)
1172 request->u.ata.command = ATA_WRITE_MUL48;
1174 request->u.ata.command = ATA_WRITE48;
1175 request->u.ata.command = ATA_WRITE48;
1176 request->flags &= ~ATA_R_DMA;
1179 request->u.ata.command = ATA_WRITE_DMA_QUEUED48;
1181 case ATA_FLUSHCACHE:
1182 request->u.ata.command = ATA_FLUSHCACHE48;
1184 case ATA_SET_MAX_ADDRESS:
1185 request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1190 request->flags |= ATA_R_48BIT;
1192 else if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
1194 /* translate command into 48bit version */
1195 switch (request->u.ata.command) {
1196 case ATA_FLUSHCACHE:
1197 request->u.ata.command = ATA_FLUSHCACHE48;
1199 case ATA_READ_NATIVE_MAX_ADDRESS:
1200 request->u.ata.command = ATA_READ_NATIVE_MAX_ADDRESS48;
1202 case ATA_SET_MAX_ADDRESS:
1203 request->u.ata.command = ATA_SET_MAX_ADDRESS48;
1208 request->flags |= ATA_R_48BIT;
1213 ata_udelay(int interval)
1215 /* for now just use DELAY, the timer/sleep subsytems are not there yet */
1216 if (1 || interval < (1000000/hz) || ata_delayed_attach)
1219 pause("ataslp", interval/(1000000/hz));
1224 ata_unit2str(struct ata_device *atadev)
1226 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
1229 if (ch->devices & ATA_PORTMULTIPLIER)
1230 sprintf(str, "port%d", atadev->unit);
1232 sprintf(str, "%s", atadev->unit == ATA_MASTER ? "master" : "slave");
1238 ata_mode2str(int mode)
1241 case -1: return "UNSUPPORTED";
1242 case ATA_PIO0: return "PIO0";
1243 case ATA_PIO1: return "PIO1";
1244 case ATA_PIO2: return "PIO2";
1245 case ATA_PIO3: return "PIO3";
1246 case ATA_PIO4: return "PIO4";
1247 case ATA_WDMA0: return "WDMA0";
1248 case ATA_WDMA1: return "WDMA1";
1249 case ATA_WDMA2: return "WDMA2";
1250 case ATA_UDMA0: return "UDMA16";
1251 case ATA_UDMA1: return "UDMA25";
1252 case ATA_UDMA2: return "UDMA33";
1253 case ATA_UDMA3: return "UDMA40";
1254 case ATA_UDMA4: return "UDMA66";
1255 case ATA_UDMA5: return "UDMA100";
1256 case ATA_UDMA6: return "UDMA133";
1257 case ATA_SA150: return "SATA150";
1258 case ATA_SA300: return "SATA300";
1260 if (mode & ATA_DMA_MASK)
1268 ata_str2mode(const char *str)
1271 if (!strcasecmp(str, "PIO0")) return (ATA_PIO0);
1272 if (!strcasecmp(str, "PIO1")) return (ATA_PIO1);
1273 if (!strcasecmp(str, "PIO2")) return (ATA_PIO2);
1274 if (!strcasecmp(str, "PIO3")) return (ATA_PIO3);
1275 if (!strcasecmp(str, "PIO4")) return (ATA_PIO4);
1276 if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0);
1277 if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1);
1278 if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2);
1279 if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0);
1280 if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0);
1281 if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1);
1282 if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1);
1283 if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2);
1284 if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2);
1285 if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3);
1286 if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3);
1287 if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4);
1288 if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4);
1289 if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5);
1290 if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5);
1291 if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6);
1292 if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6);
1298 ata_satarev2str(int rev)
1302 case 1: return "SATA 1.5Gb/s";
1303 case 2: return "SATA 3Gb/s";
1304 case 3: return "SATA 6Gb/s";
1305 case 0xff: return "SATA";
1306 default: return "???";
1312 ata_atapi(device_t dev, int target)
1314 struct ata_channel *ch = device_get_softc(dev);
1316 return (ch->devices & (ATA_ATAPI_MASTER << target));
1321 ata_pmode(struct ata_params *ap)
1323 if (ap->atavalid & ATA_FLAG_64_70) {
1324 if (ap->apiomodes & 0x02)
1326 if (ap->apiomodes & 0x01)
1329 if (ap->mwdmamodes & 0x04)
1331 if (ap->mwdmamodes & 0x02)
1333 if (ap->mwdmamodes & 0x01)
1335 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200)
1337 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100)
1339 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000)
1347 ata_wmode(struct ata_params *ap)
1349 if (ap->mwdmamodes & 0x04)
1351 if (ap->mwdmamodes & 0x02)
1353 if (ap->mwdmamodes & 0x01)
1361 ata_umode(struct ata_params *ap)
1363 if (ap->atavalid & ATA_FLAG_88) {
1364 if (ap->udmamodes & 0x40)
1366 if (ap->udmamodes & 0x20)
1368 if (ap->udmamodes & 0x10)
1370 if (ap->udmamodes & 0x08)
1372 if (ap->udmamodes & 0x04)
1374 if (ap->udmamodes & 0x02)
1376 if (ap->udmamodes & 0x01)
1385 ata_limit_mode(device_t dev, int mode, int maxmode)
1387 struct ata_device *atadev = device_get_softc(dev);
1389 if (maxmode && mode > maxmode)
1392 if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0)
1393 return min(mode, ata_umode(&atadev->param));
1395 if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0)
1396 return min(mode, ata_wmode(&atadev->param));
1398 if (mode > ata_pmode(&atadev->param))
1399 return min(mode, ata_pmode(&atadev->param));
1407 bswap(int8_t *buf, int len)
1409 u_int16_t *ptr = (u_int16_t*)(buf + len);
1411 while (--ptr >= (u_int16_t*)buf)
1418 btrim(int8_t *buf, int len)
1422 for (ptr = buf; ptr < buf+len; ++ptr)
1423 if (!*ptr || *ptr == '_')
1425 for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr)
1432 bpack(int8_t *src, int8_t *dst, int len)
1436 for (i = j = blank = 0 ; i < len; i++) {
1437 if (blank && src[i] == ' ') continue;
1438 if (blank && src[i] != ' ') {
1443 if (src[i] == ' ') {
1457 ata_cam_begin_transaction(device_t dev, union ccb *ccb)
1459 struct ata_channel *ch = device_get_softc(dev);
1460 struct ata_request *request;
1462 if (!(request = ata_alloc_request())) {
1463 device_printf(dev, "FAILURE - out of memory in start\n");
1464 ccb->ccb_h.status = CAM_REQ_INVALID;
1468 bzero(request, sizeof(*request));
1471 request->dev = NULL;
1472 request->parent = dev;
1473 request->unit = ccb->ccb_h.target_id;
1474 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1475 request->data = ccb->ataio.data_ptr;
1476 request->bytecount = ccb->ataio.dxfer_len;
1477 request->u.ata.command = ccb->ataio.cmd.command;
1478 request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) |
1479 (uint16_t)ccb->ataio.cmd.features;
1480 request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) |
1481 (uint16_t)ccb->ataio.cmd.sector_count;
1482 if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) {
1483 request->flags |= ATA_R_48BIT;
1484 request->u.ata.lba =
1485 ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) |
1486 ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) |
1487 ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24);
1489 request->u.ata.lba =
1490 ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24);
1492 request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) |
1493 ((uint64_t)ccb->ataio.cmd.lba_mid << 8) |
1494 (uint64_t)ccb->ataio.cmd.lba_low;
1495 if (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)
1496 request->flags |= ATA_R_NEEDRESULT;
1497 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1498 ccb->ataio.cmd.flags & CAM_ATAIO_DMA)
1499 request->flags |= ATA_R_DMA;
1500 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1501 request->flags |= ATA_R_READ;
1502 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1503 request->flags |= ATA_R_WRITE;
1504 if (ccb->ataio.cmd.command == ATA_READ_MUL ||
1505 ccb->ataio.cmd.command == ATA_READ_MUL48 ||
1506 ccb->ataio.cmd.command == ATA_WRITE_MUL ||
1507 ccb->ataio.cmd.command == ATA_WRITE_MUL48) {
1508 request->transfersize = min(request->bytecount,
1509 ch->curr[ccb->ccb_h.target_id].bytecount);
1511 request->transfersize = min(request->bytecount, 512);
1513 request->data = ccb->csio.data_ptr;
1514 request->bytecount = ccb->csio.dxfer_len;
1515 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
1516 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
1517 request->u.atapi.ccb, ccb->csio.cdb_len);
1518 request->flags |= ATA_R_ATAPI;
1519 if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
1520 request->flags |= ATA_R_ATAPI16;
1521 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1522 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
1523 request->flags |= ATA_R_DMA;
1524 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1525 request->flags |= ATA_R_READ;
1526 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1527 request->flags |= ATA_R_WRITE;
1528 request->transfersize = min(request->bytecount,
1529 ch->curr[ccb->ccb_h.target_id].bytecount);
1531 request->retries = 0;
1532 request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
1533 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
1535 request->flags |= ATA_R_DATA_IN_CCB;
1537 ch->running = request;
1538 ch->state = ATA_ACTIVE;
1539 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
1541 ch->state = ATA_IDLE;
1542 ata_cam_end_transaction(dev, request);
1548 ata_cam_request_sense(device_t dev, struct ata_request *request)
1550 struct ata_channel *ch = device_get_softc(dev);
1551 union ccb *ccb = request->ccb;
1553 ch->requestsense = 1;
1555 bzero(request, sizeof(*request));
1556 request->dev = NULL;
1557 request->parent = dev;
1558 request->unit = ccb->ccb_h.target_id;
1559 request->data = (void *)&ccb->csio.sense_data;
1560 request->bytecount = ccb->csio.sense_len;
1561 request->u.atapi.ccb[0] = ATAPI_REQUEST_SENSE;
1562 request->u.atapi.ccb[4] = ccb->csio.sense_len;
1563 request->flags |= ATA_R_ATAPI;
1564 if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
1565 request->flags |= ATA_R_ATAPI16;
1566 if (ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
1567 request->flags |= ATA_R_DMA;
1568 request->flags |= ATA_R_READ;
1569 request->transfersize = min(request->bytecount,
1570 ch->curr[ccb->ccb_h.target_id].bytecount);
1571 request->retries = 0;
1572 request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
1573 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
1576 ch->running = request;
1577 ch->state = ATA_ACTIVE;
1578 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
1580 ch->state = ATA_IDLE;
1581 ata_cam_end_transaction(dev, request);
1587 ata_cam_process_sense(device_t dev, struct ata_request *request)
1589 struct ata_channel *ch = device_get_softc(dev);
1590 union ccb *ccb = request->ccb;
1593 ch->requestsense = 0;
1595 if (request->flags & ATA_R_TIMEOUT)
1597 if ((request->flags & ATA_R_TIMEOUT) == 0 &&
1598 (request->status & ATA_S_ERROR) == 0 &&
1599 request->result == 0) {
1600 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1602 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1603 ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
1606 ata_free_request(request);
1608 /* Do error recovery if needed. */
1614 ata_cam_end_transaction(device_t dev, struct ata_request *request)
1616 struct ata_channel *ch = device_get_softc(dev);
1617 union ccb *ccb = request->ccb;
1620 if (ch->requestsense) {
1621 ata_cam_process_sense(dev, request);
1625 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1626 if (request->flags & ATA_R_TIMEOUT) {
1627 xpt_freeze_simq(ch->sim, 1);
1628 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1629 ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ;
1631 } else if (request->status & ATA_S_ERROR) {
1632 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1633 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
1635 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1636 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1638 } else if (request->result == ERESTART)
1639 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1640 else if (request->result != 0)
1641 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1643 ccb->ccb_h.status |= CAM_REQ_CMP;
1644 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
1645 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
1646 xpt_freeze_devq(ccb->ccb_h.path, 1);
1647 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1649 if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1650 ((request->status & ATA_S_ERROR) ||
1651 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) {
1652 struct ata_res *res = &ccb->ataio.res;
1653 res->status = request->status;
1654 res->error = request->error;
1655 res->lba_low = request->u.ata.lba;
1656 res->lba_mid = request->u.ata.lba >> 8;
1657 res->lba_high = request->u.ata.lba >> 16;
1658 res->device = request->u.ata.lba >> 24;
1659 res->lba_low_exp = request->u.ata.lba >> 24;
1660 res->lba_mid_exp = request->u.ata.lba >> 32;
1661 res->lba_high_exp = request->u.ata.lba >> 40;
1662 res->sector_count = request->u.ata.count;
1663 res->sector_count_exp = request->u.ata.count >> 8;
1665 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1666 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1668 ccb->ataio.dxfer_len - request->donecount;
1671 ccb->csio.dxfer_len - request->donecount;
1674 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR &&
1675 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1676 ata_cam_request_sense(dev, request);
1678 ata_free_request(request);
1681 /* Do error recovery if needed. */
1687 ata_check_ids(device_t dev, union ccb *ccb)
1689 struct ata_channel *ch = device_get_softc(dev);
1691 if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) {
1692 ccb->ccb_h.status = CAM_TID_INVALID;
1696 if (ccb->ccb_h.target_lun != 0) {
1697 ccb->ccb_h.status = CAM_LUN_INVALID;
1705 ataaction(struct cam_sim *sim, union ccb *ccb)
1707 device_t dev, parent;
1708 struct ata_channel *ch;
1710 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n",
1711 ccb->ccb_h.func_code));
1713 ch = (struct ata_channel *)cam_sim_softc(sim);
1715 switch (ccb->ccb_h.func_code) {
1716 /* Common cases first */
1717 case XPT_ATA_IO: /* Execute the requested I/O operation */
1719 if (ata_check_ids(dev, ccb))
1721 if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER)
1722 << ccb->ccb_h.target_id)) == 0) {
1723 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1727 device_printf(dev, "already running!\n");
1728 if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1729 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1730 (ccb->ataio.cmd.control & ATA_A_RESET)) {
1731 struct ata_res *res = &ccb->ataio.res;
1733 bzero(res, sizeof(*res));
1734 if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) {
1738 res->lba_high = 0xeb;
1739 res->lba_mid = 0x14;
1741 ccb->ccb_h.status = CAM_REQ_CMP;
1744 ata_cam_begin_transaction(dev, ccb);
1746 case XPT_EN_LUN: /* Enable LUN as a target */
1747 case XPT_TARGET_IO: /* Execute target I/O request */
1748 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
1749 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/
1750 case XPT_ABORT: /* Abort the specified CCB */
1752 ccb->ccb_h.status = CAM_REQ_INVALID;
1754 case XPT_SET_TRAN_SETTINGS:
1756 struct ccb_trans_settings *cts = &ccb->cts;
1757 struct ata_cam_device *d;
1759 if (ata_check_ids(dev, ccb))
1761 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1762 d = &ch->curr[ccb->ccb_h.target_id];
1764 d = &ch->user[ccb->ccb_h.target_id];
1765 if (ch->flags & ATA_SATA) {
1766 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
1767 d->revision = cts->xport_specific.sata.revision;
1768 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) {
1769 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1770 d->mode = ATA_SETMODE(ch->dev,
1771 ccb->ccb_h.target_id,
1772 cts->xport_specific.sata.mode);
1774 d->mode = cts->xport_specific.sata.mode;
1776 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
1777 d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
1778 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
1779 d->atapi = cts->xport_specific.sata.atapi;
1780 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS)
1781 d->caps = cts->xport_specific.sata.caps;
1783 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) {
1784 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1785 d->mode = ATA_SETMODE(ch->dev,
1786 ccb->ccb_h.target_id,
1787 cts->xport_specific.ata.mode);
1789 d->mode = cts->xport_specific.ata.mode;
1791 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT)
1792 d->bytecount = cts->xport_specific.ata.bytecount;
1793 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI)
1794 d->atapi = cts->xport_specific.ata.atapi;
1796 ccb->ccb_h.status = CAM_REQ_CMP;
1799 case XPT_GET_TRAN_SETTINGS:
1801 struct ccb_trans_settings *cts = &ccb->cts;
1802 struct ata_cam_device *d;
1804 if (ata_check_ids(dev, ccb))
1806 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1807 d = &ch->curr[ccb->ccb_h.target_id];
1809 d = &ch->user[ccb->ccb_h.target_id];
1810 cts->protocol = PROTO_UNSPECIFIED;
1811 cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
1812 if (ch->flags & ATA_SATA) {
1813 cts->transport = XPORT_SATA;
1814 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1815 cts->xport_specific.sata.valid = 0;
1816 cts->xport_specific.sata.mode = d->mode;
1817 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
1818 cts->xport_specific.sata.bytecount = d->bytecount;
1819 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
1820 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1821 cts->xport_specific.sata.revision =
1822 ATA_GETREV(dev, ccb->ccb_h.target_id);
1823 if (cts->xport_specific.sata.revision != 0xff) {
1824 cts->xport_specific.sata.valid |=
1825 CTS_SATA_VALID_REVISION;
1827 cts->xport_specific.sata.caps =
1828 d->caps & CTS_SATA_CAPS_D;
1830 cts->xport_specific.sata.caps |=
1831 CTS_SATA_CAPS_H_PMREQ;
1833 cts->xport_specific.sata.caps &=
1834 ch->user[ccb->ccb_h.target_id].caps;
1835 cts->xport_specific.sata.valid |=
1836 CTS_SATA_VALID_CAPS;
1838 cts->xport_specific.sata.revision = d->revision;
1839 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
1840 cts->xport_specific.sata.caps = d->caps;
1841 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
1843 cts->xport_specific.sata.atapi = d->atapi;
1844 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
1846 cts->transport = XPORT_ATA;
1847 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1848 cts->xport_specific.ata.valid = 0;
1849 cts->xport_specific.ata.mode = d->mode;
1850 cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE;
1851 cts->xport_specific.ata.bytecount = d->bytecount;
1852 cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT;
1853 cts->xport_specific.ata.atapi = d->atapi;
1854 cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI;
1856 ccb->ccb_h.status = CAM_REQ_CMP;
1859 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1860 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1862 ccb->ccb_h.status = CAM_REQ_CMP;
1864 case XPT_TERM_IO: /* Terminate the I/O process */
1866 ccb->ccb_h.status = CAM_REQ_INVALID;
1868 case XPT_PATH_INQ: /* Path routing inquiry */
1870 struct ccb_pathinq *cpi = &ccb->cpi;
1872 parent = device_get_parent(dev);
1873 cpi->version_num = 1; /* XXX??? */
1874 cpi->hba_inquiry = PI_SDTR_ABLE;
1875 cpi->target_sprt = 0;
1876 cpi->hba_misc = PIM_SEQSCAN;
1877 cpi->hba_eng_cnt = 0;
1878 if (ch->flags & ATA_NO_SLAVE)
1879 cpi->max_target = 0;
1881 cpi->max_target = 1;
1883 cpi->initiator_id = 0;
1884 cpi->bus_id = cam_sim_bus(sim);
1885 if (ch->flags & ATA_SATA)
1886 cpi->base_transfer_speed = 150000;
1888 cpi->base_transfer_speed = 3300;
1889 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1890 strncpy(cpi->hba_vid, "ATA", HBA_IDLEN);
1891 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1892 cpi->unit_number = cam_sim_unit(sim);
1893 if (ch->flags & ATA_SATA)
1894 cpi->transport = XPORT_SATA;
1896 cpi->transport = XPORT_ATA;
1897 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
1898 cpi->protocol = PROTO_ATA;
1899 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
1900 cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS;
1901 if (device_get_devclass(device_get_parent(parent)) ==
1902 devclass_find("pci")) {
1903 cpi->hba_vendor = pci_get_vendor(parent);
1904 cpi->hba_device = pci_get_device(parent);
1905 cpi->hba_subvendor = pci_get_subvendor(parent);
1906 cpi->hba_subdevice = pci_get_subdevice(parent);
1908 cpi->ccb_h.status = CAM_REQ_CMP;
1912 ccb->ccb_h.status = CAM_REQ_INVALID;
1919 atapoll(struct cam_sim *sim)
1921 struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim);
1923 ata_interrupt_locked(ch);
1931 ata_module_event_handler(module_t mod, int what, void *arg)
1934 static struct cdev *atacdev;
1940 /* register controlling device */
1941 atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");
1944 /* register boot attach to be run when interrupts are enabled */
1945 if (!(ata_delayed_attach = (struct intr_config_hook *)
1946 malloc(sizeof(struct intr_config_hook),
1947 M_TEMP, M_NOWAIT | M_ZERO))) {
1948 printf("ata: malloc of delayed attach hook failed\n");
1951 ata_delayed_attach->ich_func = (void*)ata_boot_attach;
1952 if (config_intrhook_establish(ata_delayed_attach) != 0) {
1953 printf("ata: config_intrhook_establish failed\n");
1954 free(ata_delayed_attach, M_TEMP);
1962 /* deregister controlling device */
1963 destroy_dev(atacdev);
1972 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1973 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
1974 MODULE_VERSION(ata, 1);
1976 MODULE_DEPEND(ata, cam, 1, 1, 1);
1982 ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request),
1983 NULL, NULL, NULL, NULL, 0, 0);
1984 ata_composite_zone = uma_zcreate("ata_composite",
1985 sizeof(struct ata_composite),
1986 NULL, NULL, NULL, NULL, 0, 0);
1988 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL);
1993 uma_zdestroy(ata_composite_zone);
1994 uma_zdestroy(ata_request_zone);
1996 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL);