2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
37 #include <sys/taskqueue.h>
39 #include <sys/mutex.h>
41 #include <sys/devicestat.h>
42 #include <sys/eventhandler.h>
43 #include <sys/malloc.h>
45 #include <geom/geom_disk.h>
54 #include <cam/cam_ccb.h>
55 #include <cam/cam_periph.h>
56 #include <cam/cam_xpt_periph.h>
57 #include <cam/cam_sim.h>
59 #include <cam/ata/ata_all.h>
63 #define ATA_MAX_28BIT_LBA 268435455UL
70 ADA_FLAG_PACK_INVALID = 0x001,
71 ADA_FLAG_CAN_48BIT = 0x002,
72 ADA_FLAG_CAN_FLUSHCACHE = 0x004,
73 ADA_FLAG_CAN_NCQ = 0x008,
74 ADA_FLAG_TAGGED_QUEUING = 0x010,
75 ADA_FLAG_NEED_OTAG = 0x020,
76 ADA_FLAG_WENT_IDLE = 0x040,
77 ADA_FLAG_RETRY_UA = 0x080,
78 ADA_FLAG_OPEN = 0x100,
79 ADA_FLAG_SCTX_INIT = 0x200
84 ADA_Q_NO_SYNC_CACHE = 0x01,
85 ADA_Q_NO_6_BYTE = 0x02,
86 ADA_Q_NO_PREVENT = 0x04
91 ADA_CCB_PROBE2 = 0x02,
92 ADA_CCB_BUFFER_IO = 0x03,
93 ADA_CCB_WAITING = 0x04,
95 ADA_CCB_TYPE_MASK = 0x0F,
96 ADA_CCB_RETRY_UA = 0x10
99 /* Offsets into our private area for storing information */
100 #define ccb_state ppriv_field0
101 #define ccb_bp ppriv_ptr1
106 u_int8_t secs_per_track;
107 u_int32_t secsize; /* Number of bytes/sector */
108 u_int64_t sectors; /* total number sectors */
112 struct bio_queue_head bio_queue;
113 SLIST_ENTRY(ada_softc) links;
114 LIST_HEAD(, ccb_hdr) pending_ccbs;
118 int ordered_tag_count;
119 int outstanding_cmds;
120 struct disk_params params;
123 struct task sysctl_task;
124 struct sysctl_ctx_list sysctl_ctx;
125 struct sysctl_oid *sysctl_tree;
126 struct callout sendordered_c;
129 struct ada_quirk_entry {
130 struct scsi_inquiry_pattern inq_pat;
134 //static struct ada_quirk_entry ada_quirk_table[] =
138 static disk_strategy_t adastrategy;
139 static dumper_t adadump;
140 static periph_init_t adainit;
141 static void adaasync(void *callback_arg, u_int32_t code,
142 struct cam_path *path, void *arg);
143 static void adasysctlinit(void *context, int pending);
144 static periph_ctor_t adaregister;
145 static periph_dtor_t adacleanup;
146 static periph_start_t adastart;
147 static periph_oninv_t adaoninvalidate;
148 static void adadone(struct cam_periph *periph,
149 union ccb *done_ccb);
150 static int adaerror(union ccb *ccb, u_int32_t cam_flags,
151 u_int32_t sense_flags);
152 static void adasetgeom(struct cam_periph *periph,
153 struct ccb_getdev *cgd);
154 static timeout_t adasendorderedtag;
155 static void adashutdown(void *arg, int howto);
157 #ifndef ADA_DEFAULT_TIMEOUT
158 #define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */
161 #ifndef ADA_DEFAULT_RETRY
162 #define ADA_DEFAULT_RETRY 4
165 #ifndef ADA_DEFAULT_SEND_ORDERED
166 #define ADA_DEFAULT_SEND_ORDERED 1
170 static int ada_retry_count = ADA_DEFAULT_RETRY;
171 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
172 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
174 SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
175 "CAM Direct Access Disk driver");
176 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
177 &ada_retry_count, 0, "Normal I/O retry count");
178 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
179 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
180 &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
181 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
182 SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW,
183 &ada_send_ordered, 0, "Send Ordered Tags");
184 TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered);
187 * ADA_ORDEREDTAG_INTERVAL determines how often, relative
188 * to the default timeout, we check to see whether an ordered
189 * tagged transaction is appropriate to prevent simple tag
190 * starvation. Since we'd like to ensure that there is at least
191 * 1/2 of the timeout length left for a starved transaction to
192 * complete after we've sent an ordered tag, we must poll at least
193 * four times in every timeout period. This takes care of the worst
194 * case where a starved transaction starts during an interval that
195 * meets the requirement "don't send an ordered tag" test so it takes
196 * us two intervals to determine that a tag must be sent.
198 #ifndef ADA_ORDEREDTAG_INTERVAL
199 #define ADA_ORDEREDTAG_INTERVAL 4
202 static struct periph_driver adadriver =
205 TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
208 PERIPHDRIVER_DECLARE(ada, adadriver);
210 MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
213 adaopen(struct disk *dp)
215 struct cam_periph *periph;
216 struct ada_softc *softc;
220 periph = (struct cam_periph *)dp->d_drv1;
221 if (periph == NULL) {
225 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
229 cam_periph_lock(periph);
230 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
231 cam_periph_unlock(periph);
232 cam_periph_release(periph);
236 unit = periph->unit_number;
237 softc = (struct ada_softc *)periph->softc;
238 softc->flags |= ADA_FLAG_OPEN;
240 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
241 ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit,
244 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
245 /* Invalidate our pack information. */
246 softc->flags &= ~ADA_FLAG_PACK_INVALID;
249 cam_periph_unhold(periph);
250 cam_periph_unlock(periph);
255 adaclose(struct disk *dp)
257 struct cam_periph *periph;
258 struct ada_softc *softc;
262 periph = (struct cam_periph *)dp->d_drv1;
266 cam_periph_lock(periph);
267 if ((error = cam_periph_hold(periph, PRIBIO)) != 0) {
268 cam_periph_unlock(periph);
269 cam_periph_release(periph);
273 softc = (struct ada_softc *)periph->softc;
274 /* We only sync the cache if the drive is capable of it. */
275 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
277 ccb = cam_periph_getccb(periph, /*priority*/1);
278 cam_fill_ataio(&ccb->ataio,
285 ada_default_timeout*1000);
287 if (softc->flags & ADA_FLAG_CAN_48BIT)
288 ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
290 ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
291 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
292 /*sense_flags*/SF_RETRY_UA,
293 softc->disk->d_devstat);
295 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
296 xpt_print(periph->path, "Synchronize cache failed\n");
298 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
299 cam_release_devq(ccb->ccb_h.path,
304 xpt_release_ccb(ccb);
307 softc->flags &= ~ADA_FLAG_OPEN;
308 cam_periph_unhold(periph);
309 cam_periph_unlock(periph);
310 cam_periph_release(periph);
315 * Actually translate the requested transfer into one the physical driver
316 * can understand. The transfer is described by a buf and will include
317 * only one physical transfer.
320 adastrategy(struct bio *bp)
322 struct cam_periph *periph;
323 struct ada_softc *softc;
325 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
326 if (periph == NULL) {
327 biofinish(bp, NULL, ENXIO);
330 softc = (struct ada_softc *)periph->softc;
332 cam_periph_lock(periph);
336 * check it's not too big a transfer for our adapter
338 scsi_minphys(bp,&sd_switch);
342 * Mask interrupts so that the pack cannot be invalidated until
343 * after we are in the queue. Otherwise, we might not properly
344 * clean up one of the buffers.
348 * If the device has been made invalid, error out
350 if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
351 cam_periph_unlock(periph);
352 biofinish(bp, NULL, ENXIO);
357 * Place it in the queue of disk activities for this disk
359 bioq_disksort(&softc->bio_queue, bp);
362 * Schedule ourselves for performing the work.
364 xpt_schedule(periph, /* XXX priority */1);
365 cam_periph_unlock(periph);
371 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
373 struct cam_periph *periph;
374 struct ada_softc *softc;
385 softc = (struct ada_softc *)periph->softc;
386 cam_periph_lock(periph);
387 secsize = softc->params.secsize;
388 lba = offset / secsize;
389 count = length / secsize;
391 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
392 cam_periph_unlock(periph);
397 periph->flags |= CAM_PERIPH_POLLED;
398 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
399 ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
400 cam_fill_ataio(&ccb.ataio,
405 (u_int8_t *) virtual,
407 ada_default_timeout*1000);
408 if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
409 (lba + count >= ATA_MAX_28BIT_LBA ||
411 ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
414 ata_36bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
417 xpt_polled_action(&ccb);
419 if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
420 printf("Aborting dump due to I/O error.\n");
421 cam_periph_unlock(periph);
424 cam_periph_unlock(periph);
428 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
429 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
431 ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
432 cam_fill_ataio(&ccb.ataio,
439 ada_default_timeout*1000);
441 if (softc->flags & ADA_FLAG_CAN_48BIT)
442 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
444 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
445 xpt_polled_action(&ccb);
447 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
448 xpt_print(periph->path, "Synchronize cache failed\n");
450 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
451 cam_release_devq(ccb.ccb_h.path,
457 periph->flags &= ~CAM_PERIPH_POLLED;
458 cam_periph_unlock(periph);
468 * Install a global async callback. This callback will
469 * receive async callbacks like "new device found".
471 status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
473 if (status != CAM_REQ_CMP) {
474 printf("ada: Failed to attach master async callback "
475 "due to status 0x%x!\n", status);
476 } else if (ada_send_ordered) {
478 /* Register our shutdown event handler */
479 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
480 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
481 printf("adainit: shutdown event registration failed!\n");
486 adaoninvalidate(struct cam_periph *periph)
488 struct ada_softc *softc;
490 softc = (struct ada_softc *)periph->softc;
493 * De-register any async callbacks.
495 xpt_register_async(0, adaasync, periph, periph->path);
497 softc->flags |= ADA_FLAG_PACK_INVALID;
500 * Return all queued I/O with ENXIO.
501 * XXX Handle any transactions queued to the card
502 * with XPT_ABORT_CCB.
504 bioq_flush(&softc->bio_queue, NULL, ENXIO);
506 disk_gone(softc->disk);
507 xpt_print(periph->path, "lost device\n");
511 adacleanup(struct cam_periph *periph)
513 struct ada_softc *softc;
515 softc = (struct ada_softc *)periph->softc;
517 xpt_print(periph->path, "removing device entry\n");
518 cam_periph_unlock(periph);
521 * If we can't free the sysctl tree, oh well...
523 if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
524 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
525 xpt_print(periph->path, "can't remove sysctl context\n");
528 disk_destroy(softc->disk);
529 callout_drain(&softc->sendordered_c);
530 free(softc, M_DEVBUF);
531 cam_periph_lock(periph);
535 adaasync(void *callback_arg, u_int32_t code,
536 struct cam_path *path, void *arg)
538 struct cam_periph *periph;
540 periph = (struct cam_periph *)callback_arg;
542 case AC_FOUND_DEVICE:
544 struct ccb_getdev *cgd;
547 cgd = (struct ccb_getdev *)arg;
551 if (cgd->protocol != PROTO_ATA)
554 // if (SID_TYPE(&cgd->inq_data) != T_DIRECT
555 // && SID_TYPE(&cgd->inq_data) != T_RBC
556 // && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
560 * Allocate a peripheral instance for
561 * this device and start the probe
564 status = cam_periph_alloc(adaregister, adaoninvalidate,
565 adacleanup, adastart,
566 "ada", CAM_PERIPH_BIO,
567 cgd->ccb_h.path, adaasync,
568 AC_FOUND_DEVICE, cgd);
570 if (status != CAM_REQ_CMP
571 && status != CAM_REQ_INPROG)
572 printf("adaasync: Unable to attach to new device "
573 "due to status 0x%x\n", status);
579 struct ada_softc *softc;
580 struct ccb_hdr *ccbh;
582 softc = (struct ada_softc *)periph->softc;
584 * Don't fail on the expected unit attention
587 softc->flags |= ADA_FLAG_RETRY_UA;
588 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
589 ccbh->ccb_state |= ADA_CCB_RETRY_UA;
593 cam_periph_async(periph, code, path, arg);
599 adasysctlinit(void *context, int pending)
601 struct cam_periph *periph;
602 struct ada_softc *softc;
603 char tmpstr[80], tmpstr2[80];
605 periph = (struct cam_periph *)context;
606 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
609 softc = (struct ada_softc *)periph->softc;
610 snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
611 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
613 sysctl_ctx_init(&softc->sysctl_ctx);
614 softc->flags |= ADA_FLAG_SCTX_INIT;
615 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
616 SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
617 CTLFLAG_RD, 0, tmpstr);
618 if (softc->sysctl_tree == NULL) {
619 printf("adasysctlinit: unable to allocate sysctl tree\n");
620 cam_periph_release(periph);
624 cam_periph_release(periph);
628 adaregister(struct cam_periph *periph, void *arg)
630 struct ada_softc *softc;
631 struct ccb_pathinq cpi;
632 struct ccb_getdev *cgd;
633 char announce_buf[80];
634 struct disk_params *dp;
638 cgd = (struct ccb_getdev *)arg;
639 if (periph == NULL) {
640 printf("adaregister: periph was NULL!!\n");
641 return(CAM_REQ_CMP_ERR);
645 printf("adaregister: no getdev CCB, can't register device\n");
646 return(CAM_REQ_CMP_ERR);
649 softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
653 printf("adaregister: Unable to probe new device. "
654 "Unable to allocate softc\n");
655 return(CAM_REQ_CMP_ERR);
658 LIST_INIT(&softc->pending_ccbs);
659 softc->state = ADA_STATE_NORMAL;
660 bioq_init(&softc->bio_queue);
662 if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
663 softc->flags |= ADA_FLAG_CAN_48BIT;
664 if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
665 softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
666 if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
667 cgd->ident_data.queue >= 31)
668 softc->flags |= ADA_FLAG_CAN_NCQ;
669 // if ((cgd->inq_data.flags & SID_CmdQue) != 0)
670 // softc->flags |= ADA_FLAG_TAGGED_QUEUING;
672 periph->softc = softc;
675 * See if this device has any quirks.
677 // match = cam_quirkmatch((caddr_t)&cgd->inq_data,
678 // (caddr_t)ada_quirk_table,
679 // sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
680 // sizeof(*ada_quirk_table), scsi_inquiry_match);
684 softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
686 softc->quirks = ADA_Q_NONE;
688 /* Check if the SIM does not want queued commands */
689 bzero(&cpi, sizeof(cpi));
690 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
691 cpi.ccb_h.func_code = XPT_PATH_INQ;
692 xpt_action((union ccb *)&cpi);
693 if (cpi.ccb_h.status != CAM_REQ_CMP ||
694 (cpi.hba_inquiry & PI_TAG_ABLE) == 0)
695 softc->flags &= ~ADA_FLAG_CAN_NCQ;
697 TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
700 * Register this media as a disk
702 mtx_unlock(periph->sim->mtx);
703 softc->disk = disk_alloc();
704 softc->disk->d_open = adaopen;
705 softc->disk->d_close = adaclose;
706 softc->disk->d_strategy = adastrategy;
707 softc->disk->d_dump = adadump;
708 softc->disk->d_name = "ada";
709 softc->disk->d_drv1 = periph;
710 maxio = cpi.maxio; /* Honor max I/O size of SIM */
712 maxio = DFLTPHYS; /* traditional default */
713 else if (maxio > MAXPHYS)
714 maxio = MAXPHYS; /* for safety */
715 if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
716 maxio = min(maxio, 65535 * 512);
717 else /* 28bit ATA command limit */
718 maxio = min(maxio, 255 * 512);
719 softc->disk->d_maxsize = maxio;
720 softc->disk->d_unit = periph->unit_number;
721 softc->disk->d_flags = 0;
722 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
723 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
725 adasetgeom(periph, cgd);
726 softc->disk->d_sectorsize = softc->params.secsize;
727 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
728 /* XXX: these are not actually "firmware" values, so they may be wrong */
729 softc->disk->d_fwsectors = softc->params.secs_per_track;
730 softc->disk->d_fwheads = softc->params.heads;
731 // softc->disk->d_devstat->block_size = softc->params.secsize;
732 // softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
734 disk_create(softc->disk, DISK_VERSION);
735 mtx_lock(periph->sim->mtx);
738 snprintf(announce_buf, sizeof(announce_buf),
739 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
740 (uintmax_t)(((uintmax_t)dp->secsize *
741 dp->sectors) / (1024*1024)),
742 (uintmax_t)dp->sectors,
743 dp->secsize, dp->heads,
744 dp->secs_per_track, dp->cylinders);
745 xpt_announce_periph(periph, announce_buf);
746 if (softc->flags & ADA_FLAG_CAN_NCQ) {
747 printf("%s%d: Native Command Queueing enabled\n",
748 periph->periph_name, periph->unit_number);
752 * Add async callbacks for bus reset and
753 * bus device reset calls. I don't bother
754 * checking if this fails as, in most cases,
755 * the system will function just fine without
756 * them and the only alternative would be to
757 * not attach the device on failure.
759 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
760 adaasync, periph, periph->path);
763 * Take an exclusive refcount on the periph while adastart is called
764 * to finish the probe. The reference will be dropped in adadone at
767 // (void)cam_periph_hold(periph, PRIBIO);
768 // xpt_schedule(periph, /*priority*/5);
771 * Schedule a periodic event to occasionally send an
772 * ordered tag to a device.
774 callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
775 callout_reset(&softc->sendordered_c,
776 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
777 adasendorderedtag, softc);
783 adastart(struct cam_periph *periph, union ccb *start_ccb)
785 struct ada_softc *softc;
787 softc = (struct ada_softc *)periph->softc;
789 switch (softc->state) {
790 case ADA_STATE_NORMAL:
792 /* Pull a buffer from the queue and get going on it */
796 * See if there is a buf with work for us to do..
798 bp = bioq_first(&softc->bio_queue);
799 if (periph->immediate_priority <= periph->pinfo.priority) {
800 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
801 ("queuing for immediate ccb\n"));
802 start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
803 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
805 periph->immediate_priority = CAM_PRIORITY_NONE;
806 wakeup(&periph->ccb_list);
807 } else if (bp == NULL) {
808 xpt_release_ccb(start_ccb);
810 struct ccb_ataio *ataio = &start_ccb->ataio;
813 bioq_remove(&softc->bio_queue, bp);
815 if ((softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
816 softc->flags &= ~ADA_FLAG_NEED_OTAG;
817 softc->ordered_tag_count++;
818 tag_code = 0;//MSG_ORDERED_Q_TAG;
820 tag_code = 0;//MSG_SIMPLE_Q_TAG;
822 switch (bp->bio_cmd) {
826 uint64_t lba = bp->bio_pblkno;
827 uint16_t count = bp->bio_bcount / softc->params.secsize;
829 cam_fill_ataio(ataio,
832 bp->bio_cmd == BIO_READ ?
833 CAM_DIR_IN : CAM_DIR_OUT,
837 ada_default_timeout*1000);
839 if (softc->flags & ADA_FLAG_CAN_NCQ) {
840 if (bp->bio_cmd == BIO_READ) {
841 ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
844 ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
847 } else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
848 (lba + count >= ATA_MAX_28BIT_LBA ||
850 if (bp->bio_cmd == BIO_READ) {
851 ata_48bit_cmd(ataio, ATA_READ_DMA48,
854 ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
858 if (bp->bio_cmd == BIO_READ) {
859 ata_36bit_cmd(ataio, ATA_READ_DMA,
862 ata_36bit_cmd(ataio, ATA_WRITE_DMA,
869 cam_fill_ataio(ataio,
876 ada_default_timeout*1000);
878 if (softc->flags & ADA_FLAG_CAN_48BIT)
879 ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
881 ata_48bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
884 start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
887 * Block out any asyncronous callbacks
888 * while we touch the pending ccb list.
890 LIST_INSERT_HEAD(&softc->pending_ccbs,
891 &start_ccb->ccb_h, periph_links.le);
892 softc->outstanding_cmds++;
894 /* We expect a unit attention from this device */
895 if ((softc->flags & ADA_FLAG_RETRY_UA) != 0) {
896 start_ccb->ccb_h.ccb_state |= ADA_CCB_RETRY_UA;
897 softc->flags &= ~ADA_FLAG_RETRY_UA;
900 start_ccb->ccb_h.ccb_bp = bp;
901 bp = bioq_first(&softc->bio_queue);
903 xpt_action(start_ccb);
907 /* Have more work to do, so ensure we stay scheduled */
908 xpt_schedule(periph, /* XXX priority */1);
916 adadone(struct cam_periph *periph, union ccb *done_ccb)
918 struct ada_softc *softc;
919 struct ccb_ataio *ataio;
921 softc = (struct ada_softc *)periph->softc;
922 ataio = &done_ccb->ataio;
923 switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
924 case ADA_CCB_BUFFER_IO:
928 bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
929 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
932 error = adaerror(done_ccb, CAM_RETRY_SELTO, 0);
933 if (error == ERESTART) {
935 * A retry was scheuled, so
942 if (error == ENXIO) {
944 * Catastrophic error. Mark our pack as
948 * XXX See if this is really a media
951 xpt_print(periph->path,
952 "Invalidating pack\n");
953 softc->flags |= ADA_FLAG_PACK_INVALID;
957 * return all queued I/O with EIO, so that
958 * the client can retry these I/Os in the
959 * proper order should it attempt to recover.
961 bioq_flush(&softc->bio_queue, NULL, EIO);
962 bp->bio_error = error;
963 bp->bio_resid = bp->bio_bcount;
964 bp->bio_flags |= BIO_ERROR;
966 bp->bio_resid = ataio->resid;
968 if (bp->bio_resid != 0)
969 bp->bio_flags |= BIO_ERROR;
971 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
972 cam_release_devq(done_ccb->ccb_h.path,
978 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
979 panic("REQ_CMP with QFRZN");
980 bp->bio_resid = ataio->resid;
981 if (ataio->resid > 0)
982 bp->bio_flags |= BIO_ERROR;
986 * Block out any asyncronous callbacks
987 * while we touch the pending ccb list.
989 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
990 softc->outstanding_cmds--;
991 if (softc->outstanding_cmds == 0)
992 softc->flags |= ADA_FLAG_WENT_IDLE;
997 case ADA_CCB_WAITING:
999 /* Caller will release the CCB */
1000 wakeup(&done_ccb->ccb_h.cbfcnp);
1004 /* No-op. We're polling */
1009 xpt_release_ccb(done_ccb);
1013 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1015 struct ada_softc *softc;
1016 struct cam_periph *periph;
1018 periph = xpt_path_periph(ccb->ccb_h.path);
1019 softc = (struct ada_softc *)periph->softc;
1021 return(cam_periph_error(ccb, cam_flags, sense_flags,
1022 &softc->saved_ccb));
1026 adasetgeom(struct cam_periph *periph, struct ccb_getdev *cgd)
1028 struct ada_softc *softc = (struct ada_softc *)periph->softc;
1029 struct disk_params *dp = &softc->params;
1030 u_int64_t lbasize48;
1034 if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1035 cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1036 dp->heads = cgd->ident_data.current_heads;
1037 dp->secs_per_track = cgd->ident_data.current_sectors;
1038 dp->cylinders = cgd->ident_data.cylinders;
1039 dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1040 ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1042 dp->heads = cgd->ident_data.heads;
1043 dp->secs_per_track = cgd->ident_data.sectors;
1044 dp->cylinders = cgd->ident_data.cylinders;
1045 dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1047 lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1048 ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1050 /* does this device need oldstyle CHS addressing */
1051 // if (!ad_version(cgd->ident_data.version_major) || !lbasize)
1052 // atadev->flags |= ATA_D_USE_CHS;
1054 /* use the 28bit LBA size if valid or bigger than the CHS mapping */
1055 if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1056 dp->sectors = lbasize;
1058 /* use the 48bit LBA size if valid */
1059 lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1060 ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1061 ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1062 ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1063 if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1064 lbasize48 > ATA_MAX_28BIT_LBA)
1065 dp->sectors = lbasize48;
1069 adasendorderedtag(void *arg)
1071 struct ada_softc *softc = arg;
1073 if (ada_send_ordered) {
1074 if ((softc->ordered_tag_count == 0)
1075 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1076 softc->flags |= ADA_FLAG_NEED_OTAG;
1078 if (softc->outstanding_cmds > 0)
1079 softc->flags &= ~ADA_FLAG_WENT_IDLE;
1081 softc->ordered_tag_count = 0;
1083 /* Queue us up again */
1084 callout_reset(&softc->sendordered_c,
1085 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
1086 adasendorderedtag, softc);
1090 * Step through all ADA peripheral drivers, and if the device is still open,
1091 * sync the disk cache to physical media.
1094 adashutdown(void * arg, int howto)
1096 struct cam_periph *periph;
1097 struct ada_softc *softc;
1099 TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1102 cam_periph_lock(periph);
1103 softc = (struct ada_softc *)periph->softc;
1105 * We only sync the cache if the drive is still open, and
1106 * if the drive is capable of it..
1108 if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1109 (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1110 cam_periph_unlock(periph);
1114 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1116 ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1117 cam_fill_ataio(&ccb.ataio,
1124 ada_default_timeout*1000);
1126 if (softc->flags & ADA_FLAG_CAN_48BIT)
1127 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1129 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
1130 xpt_polled_action(&ccb);
1132 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1133 xpt_print(periph->path, "Synchronize cache failed\n");
1135 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1136 cam_release_devq(ccb.ccb_h.path,
1140 /*getcount_only*/0);
1141 cam_periph_unlock(periph);
1145 #endif /* _KERNEL */