2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
37 #include <sys/taskqueue.h>
39 #include <sys/mutex.h>
41 #include <sys/devicestat.h>
42 #include <sys/eventhandler.h>
43 #include <sys/malloc.h>
45 #include <geom/geom_disk.h>
54 #include <cam/cam_ccb.h>
55 #include <cam/cam_periph.h>
56 #include <cam/cam_xpt_periph.h>
57 #include <cam/cam_sim.h>
59 #include <cam/ata/ata_all.h>
63 #define ATA_MAX_28BIT_LBA 268435455UL
70 ADA_FLAG_PACK_INVALID = 0x001,
71 ADA_FLAG_CAN_48BIT = 0x002,
72 ADA_FLAG_CAN_FLUSHCACHE = 0x004,
73 ADA_FLAG_CAN_NCQ = 0x008,
74 ADA_FLAG_TAGGED_QUEUING = 0x010,
75 ADA_FLAG_NEED_OTAG = 0x020,
76 ADA_FLAG_WENT_IDLE = 0x040,
77 ADA_FLAG_RETRY_UA = 0x080,
78 ADA_FLAG_OPEN = 0x100,
79 ADA_FLAG_SCTX_INIT = 0x200
84 ADA_Q_NO_SYNC_CACHE = 0x01,
85 ADA_Q_NO_6_BYTE = 0x02,
86 ADA_Q_NO_PREVENT = 0x04
91 ADA_CCB_PROBE2 = 0x02,
92 ADA_CCB_BUFFER_IO = 0x03,
93 ADA_CCB_WAITING = 0x04,
95 ADA_CCB_TYPE_MASK = 0x0F,
96 ADA_CCB_RETRY_UA = 0x10
99 /* Offsets into our private area for storing information */
100 #define ccb_state ppriv_field0
101 #define ccb_bp ppriv_ptr1
106 u_int8_t secs_per_track;
107 u_int32_t secsize; /* Number of bytes/sector */
108 u_int64_t sectors; /* total number sectors */
112 struct bio_queue_head bio_queue;
113 SLIST_ENTRY(ada_softc) links;
114 LIST_HEAD(, ccb_hdr) pending_ccbs;
118 int ordered_tag_count;
119 int outstanding_cmds;
120 struct disk_params params;
123 struct task sysctl_task;
124 struct sysctl_ctx_list sysctl_ctx;
125 struct sysctl_oid *sysctl_tree;
126 struct callout sendordered_c;
129 struct ada_quirk_entry {
130 struct scsi_inquiry_pattern inq_pat;
134 //static struct ada_quirk_entry ada_quirk_table[] =
138 static disk_strategy_t adastrategy;
139 static dumper_t adadump;
140 static periph_init_t adainit;
141 static void adaasync(void *callback_arg, u_int32_t code,
142 struct cam_path *path, void *arg);
143 static void adasysctlinit(void *context, int pending);
144 static periph_ctor_t adaregister;
145 static periph_dtor_t adacleanup;
146 static periph_start_t adastart;
147 static periph_oninv_t adaoninvalidate;
148 static void adadone(struct cam_periph *periph,
149 union ccb *done_ccb);
150 static int adaerror(union ccb *ccb, u_int32_t cam_flags,
151 u_int32_t sense_flags);
152 static void adasetgeom(struct cam_periph *periph,
153 struct ccb_getdev *cgd);
154 static timeout_t adasendorderedtag;
155 static void adashutdown(void *arg, int howto);
157 #ifndef ADA_DEFAULT_TIMEOUT
158 #define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */
161 #ifndef ADA_DEFAULT_RETRY
162 #define ADA_DEFAULT_RETRY 4
165 #ifndef ADA_DEFAULT_SEND_ORDERED
166 #define ADA_DEFAULT_SEND_ORDERED 1
170 static int ada_retry_count = ADA_DEFAULT_RETRY;
171 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
172 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
174 SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
175 "CAM Direct Access Disk driver");
176 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
177 &ada_retry_count, 0, "Normal I/O retry count");
178 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
179 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
180 &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
181 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
182 SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW,
183 &ada_send_ordered, 0, "Send Ordered Tags");
184 TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered);
187 * ADA_ORDEREDTAG_INTERVAL determines how often, relative
188 * to the default timeout, we check to see whether an ordered
189 * tagged transaction is appropriate to prevent simple tag
190 * starvation. Since we'd like to ensure that there is at least
191 * 1/2 of the timeout length left for a starved transaction to
192 * complete after we've sent an ordered tag, we must poll at least
193 * four times in every timeout period. This takes care of the worst
194 * case where a starved transaction starts during an interval that
195 * meets the requirement "don't send an ordered tag" test so it takes
196 * us two intervals to determine that a tag must be sent.
198 #ifndef ADA_ORDEREDTAG_INTERVAL
199 #define ADA_ORDEREDTAG_INTERVAL 4
202 static struct periph_driver adadriver =
205 TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
208 PERIPHDRIVER_DECLARE(ada, adadriver);
210 MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
213 adaopen(struct disk *dp)
215 struct cam_periph *periph;
216 struct ada_softc *softc;
220 periph = (struct cam_periph *)dp->d_drv1;
221 if (periph == NULL) {
225 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
229 cam_periph_lock(periph);
230 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
231 cam_periph_unlock(periph);
232 cam_periph_release(periph);
236 unit = periph->unit_number;
237 softc = (struct ada_softc *)periph->softc;
238 softc->flags |= ADA_FLAG_OPEN;
240 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
241 ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit,
244 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
245 /* Invalidate our pack information. */
246 softc->flags &= ~ADA_FLAG_PACK_INVALID;
249 cam_periph_unhold(periph);
250 cam_periph_unlock(periph);
255 adaclose(struct disk *dp)
257 struct cam_periph *periph;
258 struct ada_softc *softc;
262 periph = (struct cam_periph *)dp->d_drv1;
266 cam_periph_lock(periph);
267 if ((error = cam_periph_hold(periph, PRIBIO)) != 0) {
268 cam_periph_unlock(periph);
269 cam_periph_release(periph);
273 softc = (struct ada_softc *)periph->softc;
274 /* We only sync the cache if the drive is capable of it. */
275 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
277 ccb = cam_periph_getccb(periph, /*priority*/1);
278 cam_fill_ataio(&ccb->ataio,
285 ada_default_timeout*1000);
287 if (softc->flags & ADA_FLAG_CAN_48BIT)
288 ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
290 ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
291 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
292 /*sense_flags*/SF_RETRY_UA,
293 softc->disk->d_devstat);
295 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
296 xpt_print(periph->path, "Synchronize cache failed\n");
298 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
299 cam_release_devq(ccb->ccb_h.path,
304 xpt_release_ccb(ccb);
307 softc->flags &= ~ADA_FLAG_OPEN;
308 cam_periph_unhold(periph);
309 cam_periph_unlock(periph);
310 cam_periph_release(periph);
315 * Actually translate the requested transfer into one the physical driver
316 * can understand. The transfer is described by a buf and will include
317 * only one physical transfer.
320 adastrategy(struct bio *bp)
322 struct cam_periph *periph;
323 struct ada_softc *softc;
325 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
326 if (periph == NULL) {
327 biofinish(bp, NULL, ENXIO);
330 softc = (struct ada_softc *)periph->softc;
332 cam_periph_lock(periph);
336 * check it's not too big a transfer for our adapter
338 scsi_minphys(bp,&sd_switch);
342 * Mask interrupts so that the pack cannot be invalidated until
343 * after we are in the queue. Otherwise, we might not properly
344 * clean up one of the buffers.
348 * If the device has been made invalid, error out
350 if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
351 cam_periph_unlock(periph);
352 biofinish(bp, NULL, ENXIO);
357 * Place it in the queue of disk activities for this disk
359 bioq_disksort(&softc->bio_queue, bp);
362 * Schedule ourselves for performing the work.
364 xpt_schedule(periph, /* XXX priority */1);
365 cam_periph_unlock(periph);
371 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
373 struct cam_periph *periph;
374 struct ada_softc *softc;
385 softc = (struct ada_softc *)periph->softc;
386 cam_periph_lock(periph);
387 secsize = softc->params.secsize;
388 lba = offset / secsize;
389 count = length / secsize;
391 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
392 cam_periph_unlock(periph);
397 periph->flags |= CAM_PERIPH_POLLED;
398 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
399 ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
400 cam_fill_ataio(&ccb.ataio,
405 (u_int8_t *) virtual,
407 ada_default_timeout*1000);
408 if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
409 (lba + count >= ATA_MAX_28BIT_LBA ||
411 ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
414 ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
417 xpt_polled_action(&ccb);
419 if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
420 printf("Aborting dump due to I/O error.\n");
421 cam_periph_unlock(periph);
424 cam_periph_unlock(periph);
428 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
429 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
431 ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
432 cam_fill_ataio(&ccb.ataio,
439 ada_default_timeout*1000);
441 if (softc->flags & ADA_FLAG_CAN_48BIT)
442 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
444 ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
445 xpt_polled_action(&ccb);
447 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
448 xpt_print(periph->path, "Synchronize cache failed\n");
450 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
451 cam_release_devq(ccb.ccb_h.path,
457 periph->flags &= ~CAM_PERIPH_POLLED;
458 cam_periph_unlock(periph);
468 * Install a global async callback. This callback will
469 * receive async callbacks like "new device found".
471 status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
473 if (status != CAM_REQ_CMP) {
474 printf("ada: Failed to attach master async callback "
475 "due to status 0x%x!\n", status);
476 } else if (ada_send_ordered) {
478 /* Register our shutdown event handler */
479 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
480 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
481 printf("adainit: shutdown event registration failed!\n");
486 adaoninvalidate(struct cam_periph *periph)
488 struct ada_softc *softc;
490 softc = (struct ada_softc *)periph->softc;
493 * De-register any async callbacks.
495 xpt_register_async(0, adaasync, periph, periph->path);
497 softc->flags |= ADA_FLAG_PACK_INVALID;
500 * Return all queued I/O with ENXIO.
501 * XXX Handle any transactions queued to the card
502 * with XPT_ABORT_CCB.
504 bioq_flush(&softc->bio_queue, NULL, ENXIO);
506 disk_gone(softc->disk);
507 xpt_print(periph->path, "lost device\n");
511 adacleanup(struct cam_periph *periph)
513 struct ada_softc *softc;
515 softc = (struct ada_softc *)periph->softc;
517 xpt_print(periph->path, "removing device entry\n");
518 cam_periph_unlock(periph);
521 * If we can't free the sysctl tree, oh well...
523 if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
524 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
525 xpt_print(periph->path, "can't remove sysctl context\n");
528 disk_destroy(softc->disk);
529 callout_drain(&softc->sendordered_c);
530 free(softc, M_DEVBUF);
531 cam_periph_lock(periph);
535 adaasync(void *callback_arg, u_int32_t code,
536 struct cam_path *path, void *arg)
538 struct cam_periph *periph;
540 periph = (struct cam_periph *)callback_arg;
542 case AC_FOUND_DEVICE:
544 struct ccb_getdev *cgd;
547 cgd = (struct ccb_getdev *)arg;
551 if (cgd->protocol != PROTO_ATA)
554 // if (SID_TYPE(&cgd->inq_data) != T_DIRECT
555 // && SID_TYPE(&cgd->inq_data) != T_RBC
556 // && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
560 * Allocate a peripheral instance for
561 * this device and start the probe
564 status = cam_periph_alloc(adaregister, adaoninvalidate,
565 adacleanup, adastart,
566 "ada", CAM_PERIPH_BIO,
567 cgd->ccb_h.path, adaasync,
568 AC_FOUND_DEVICE, cgd);
570 if (status != CAM_REQ_CMP
571 && status != CAM_REQ_INPROG)
572 printf("adaasync: Unable to attach to new device "
573 "due to status 0x%x\n", status);
579 struct ada_softc *softc;
580 struct ccb_hdr *ccbh;
582 softc = (struct ada_softc *)periph->softc;
584 * Don't fail on the expected unit attention
587 softc->flags |= ADA_FLAG_RETRY_UA;
588 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
589 ccbh->ccb_state |= ADA_CCB_RETRY_UA;
593 cam_periph_async(periph, code, path, arg);
599 adasysctlinit(void *context, int pending)
601 struct cam_periph *periph;
602 struct ada_softc *softc;
603 char tmpstr[80], tmpstr2[80];
605 periph = (struct cam_periph *)context;
606 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
609 softc = (struct ada_softc *)periph->softc;
610 snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
611 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
613 sysctl_ctx_init(&softc->sysctl_ctx);
614 softc->flags |= ADA_FLAG_SCTX_INIT;
615 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
616 SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
617 CTLFLAG_RD, 0, tmpstr);
618 if (softc->sysctl_tree == NULL) {
619 printf("adasysctlinit: unable to allocate sysctl tree\n");
620 cam_periph_release(periph);
624 cam_periph_release(periph);
628 adaregister(struct cam_periph *periph, void *arg)
630 struct ada_softc *softc;
631 struct ccb_pathinq cpi;
632 struct ccb_getdev *cgd;
633 char announce_buf[80];
634 struct disk_params *dp;
638 cgd = (struct ccb_getdev *)arg;
639 if (periph == NULL) {
640 printf("adaregister: periph was NULL!!\n");
641 return(CAM_REQ_CMP_ERR);
645 printf("adaregister: no getdev CCB, can't register device\n");
646 return(CAM_REQ_CMP_ERR);
649 softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
653 printf("adaregister: Unable to probe new device. "
654 "Unable to allocate softc\n");
655 return(CAM_REQ_CMP_ERR);
658 LIST_INIT(&softc->pending_ccbs);
659 softc->state = ADA_STATE_NORMAL;
660 bioq_init(&softc->bio_queue);
662 if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
663 softc->flags |= ADA_FLAG_CAN_48BIT;
664 if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
665 softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
666 if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
667 cgd->ident_data.queue >= 31)
668 softc->flags |= ADA_FLAG_CAN_NCQ;
669 // if ((cgd->inq_data.flags & SID_CmdQue) != 0)
670 // softc->flags |= ADA_FLAG_TAGGED_QUEUING;
672 periph->softc = softc;
675 * See if this device has any quirks.
677 // match = cam_quirkmatch((caddr_t)&cgd->inq_data,
678 // (caddr_t)ada_quirk_table,
679 // sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
680 // sizeof(*ada_quirk_table), scsi_inquiry_match);
684 softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
686 softc->quirks = ADA_Q_NONE;
688 /* Check if the SIM does not want queued commands */
689 bzero(&cpi, sizeof(cpi));
690 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
691 cpi.ccb_h.func_code = XPT_PATH_INQ;
692 xpt_action((union ccb *)&cpi);
693 if (cpi.ccb_h.status != CAM_REQ_CMP ||
694 (cpi.hba_inquiry & PI_TAG_ABLE) == 0)
695 softc->flags &= ~ADA_FLAG_CAN_NCQ;
697 TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
700 * Register this media as a disk
702 mtx_unlock(periph->sim->mtx);
703 softc->disk = disk_alloc();
704 softc->disk->d_open = adaopen;
705 softc->disk->d_close = adaclose;
706 softc->disk->d_strategy = adastrategy;
707 softc->disk->d_dump = adadump;
708 softc->disk->d_name = "ada";
709 softc->disk->d_drv1 = periph;
710 maxio = cpi.maxio; /* Honor max I/O size of SIM */
712 maxio = DFLTPHYS; /* traditional default */
713 else if (maxio > MAXPHYS)
714 maxio = MAXPHYS; /* for safety */
715 if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
716 maxio = min(maxio, 65535 * 512);
717 else /* 28bit ATA command limit */
718 maxio = min(maxio, 255 * 512);
719 softc->disk->d_maxsize = maxio;
720 softc->disk->d_unit = periph->unit_number;
721 softc->disk->d_flags = 0;
722 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
723 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
724 strlcpy(softc->disk->d_ident, cgd->serial_num,
725 MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1));
727 adasetgeom(periph, cgd);
728 softc->disk->d_sectorsize = softc->params.secsize;
729 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
730 /* XXX: these are not actually "firmware" values, so they may be wrong */
731 softc->disk->d_fwsectors = softc->params.secs_per_track;
732 softc->disk->d_fwheads = softc->params.heads;
733 // softc->disk->d_devstat->block_size = softc->params.secsize;
734 // softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
736 disk_create(softc->disk, DISK_VERSION);
737 mtx_lock(periph->sim->mtx);
740 snprintf(announce_buf, sizeof(announce_buf),
741 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
742 (uintmax_t)(((uintmax_t)dp->secsize *
743 dp->sectors) / (1024*1024)),
744 (uintmax_t)dp->sectors,
745 dp->secsize, dp->heads,
746 dp->secs_per_track, dp->cylinders);
747 xpt_announce_periph(periph, announce_buf);
748 if (softc->flags & ADA_FLAG_CAN_NCQ) {
749 printf("%s%d: Native Command Queueing enabled\n",
750 periph->periph_name, periph->unit_number);
754 * Add async callbacks for bus reset and
755 * bus device reset calls. I don't bother
756 * checking if this fails as, in most cases,
757 * the system will function just fine without
758 * them and the only alternative would be to
759 * not attach the device on failure.
761 xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
762 adaasync, periph, periph->path);
765 * Take an exclusive refcount on the periph while adastart is called
766 * to finish the probe. The reference will be dropped in adadone at
769 // (void)cam_periph_hold(periph, PRIBIO);
770 // xpt_schedule(periph, /*priority*/5);
773 * Schedule a periodic event to occasionally send an
774 * ordered tag to a device.
776 callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
777 callout_reset(&softc->sendordered_c,
778 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
779 adasendorderedtag, softc);
785 adastart(struct cam_periph *periph, union ccb *start_ccb)
787 struct ada_softc *softc;
789 softc = (struct ada_softc *)periph->softc;
791 switch (softc->state) {
792 case ADA_STATE_NORMAL:
794 /* Pull a buffer from the queue and get going on it */
798 * See if there is a buf with work for us to do..
800 bp = bioq_first(&softc->bio_queue);
801 if (periph->immediate_priority <= periph->pinfo.priority) {
802 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
803 ("queuing for immediate ccb\n"));
804 start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
805 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
807 periph->immediate_priority = CAM_PRIORITY_NONE;
808 wakeup(&periph->ccb_list);
809 } else if (bp == NULL) {
810 xpt_release_ccb(start_ccb);
812 struct ccb_ataio *ataio = &start_ccb->ataio;
815 bioq_remove(&softc->bio_queue, bp);
817 if ((softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
818 softc->flags &= ~ADA_FLAG_NEED_OTAG;
819 softc->ordered_tag_count++;
820 tag_code = 0;//MSG_ORDERED_Q_TAG;
822 tag_code = 0;//MSG_SIMPLE_Q_TAG;
824 switch (bp->bio_cmd) {
828 uint64_t lba = bp->bio_pblkno;
829 uint16_t count = bp->bio_bcount / softc->params.secsize;
831 cam_fill_ataio(ataio,
834 bp->bio_cmd == BIO_READ ?
835 CAM_DIR_IN : CAM_DIR_OUT,
839 ada_default_timeout*1000);
841 if (softc->flags & ADA_FLAG_CAN_NCQ) {
842 if (bp->bio_cmd == BIO_READ) {
843 ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
846 ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
849 } else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
850 (lba + count >= ATA_MAX_28BIT_LBA ||
852 if (bp->bio_cmd == BIO_READ) {
853 ata_48bit_cmd(ataio, ATA_READ_DMA48,
856 ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
860 if (bp->bio_cmd == BIO_READ) {
861 ata_28bit_cmd(ataio, ATA_READ_DMA,
864 ata_28bit_cmd(ataio, ATA_WRITE_DMA,
871 cam_fill_ataio(ataio,
878 ada_default_timeout*1000);
880 if (softc->flags & ADA_FLAG_CAN_48BIT)
881 ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
883 ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
886 start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
889 * Block out any asyncronous callbacks
890 * while we touch the pending ccb list.
892 LIST_INSERT_HEAD(&softc->pending_ccbs,
893 &start_ccb->ccb_h, periph_links.le);
894 softc->outstanding_cmds++;
896 /* We expect a unit attention from this device */
897 if ((softc->flags & ADA_FLAG_RETRY_UA) != 0) {
898 start_ccb->ccb_h.ccb_state |= ADA_CCB_RETRY_UA;
899 softc->flags &= ~ADA_FLAG_RETRY_UA;
902 start_ccb->ccb_h.ccb_bp = bp;
903 bp = bioq_first(&softc->bio_queue);
905 xpt_action(start_ccb);
909 /* Have more work to do, so ensure we stay scheduled */
910 xpt_schedule(periph, /* XXX priority */1);
918 adadone(struct cam_periph *periph, union ccb *done_ccb)
920 struct ada_softc *softc;
921 struct ccb_ataio *ataio;
923 softc = (struct ada_softc *)periph->softc;
924 ataio = &done_ccb->ataio;
925 switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
926 case ADA_CCB_BUFFER_IO:
930 bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
931 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
934 error = adaerror(done_ccb, CAM_RETRY_SELTO, 0);
935 if (error == ERESTART) {
937 * A retry was scheuled, so
944 if (error == ENXIO) {
946 * Catastrophic error. Mark our pack as
950 * XXX See if this is really a media
953 xpt_print(periph->path,
954 "Invalidating pack\n");
955 softc->flags |= ADA_FLAG_PACK_INVALID;
959 * return all queued I/O with EIO, so that
960 * the client can retry these I/Os in the
961 * proper order should it attempt to recover.
963 bioq_flush(&softc->bio_queue, NULL, EIO);
964 bp->bio_error = error;
965 bp->bio_resid = bp->bio_bcount;
966 bp->bio_flags |= BIO_ERROR;
968 bp->bio_resid = ataio->resid;
970 if (bp->bio_resid != 0)
971 bp->bio_flags |= BIO_ERROR;
973 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
974 cam_release_devq(done_ccb->ccb_h.path,
980 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
981 panic("REQ_CMP with QFRZN");
982 bp->bio_resid = ataio->resid;
983 if (ataio->resid > 0)
984 bp->bio_flags |= BIO_ERROR;
988 * Block out any asyncronous callbacks
989 * while we touch the pending ccb list.
991 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
992 softc->outstanding_cmds--;
993 if (softc->outstanding_cmds == 0)
994 softc->flags |= ADA_FLAG_WENT_IDLE;
999 case ADA_CCB_WAITING:
1001 /* Caller will release the CCB */
1002 wakeup(&done_ccb->ccb_h.cbfcnp);
1006 /* No-op. We're polling */
1011 xpt_release_ccb(done_ccb);
1015 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1017 struct ada_softc *softc;
1018 struct cam_periph *periph;
1020 periph = xpt_path_periph(ccb->ccb_h.path);
1021 softc = (struct ada_softc *)periph->softc;
1023 return(cam_periph_error(ccb, cam_flags, sense_flags,
1024 &softc->saved_ccb));
1028 adasetgeom(struct cam_periph *periph, struct ccb_getdev *cgd)
1030 struct ada_softc *softc = (struct ada_softc *)periph->softc;
1031 struct disk_params *dp = &softc->params;
1032 u_int64_t lbasize48;
1036 if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1037 cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1038 dp->heads = cgd->ident_data.current_heads;
1039 dp->secs_per_track = cgd->ident_data.current_sectors;
1040 dp->cylinders = cgd->ident_data.cylinders;
1041 dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1042 ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1044 dp->heads = cgd->ident_data.heads;
1045 dp->secs_per_track = cgd->ident_data.sectors;
1046 dp->cylinders = cgd->ident_data.cylinders;
1047 dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1049 lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1050 ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1052 /* does this device need oldstyle CHS addressing */
1053 // if (!ad_version(cgd->ident_data.version_major) || !lbasize)
1054 // atadev->flags |= ATA_D_USE_CHS;
1056 /* use the 28bit LBA size if valid or bigger than the CHS mapping */
1057 if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1058 dp->sectors = lbasize;
1060 /* use the 48bit LBA size if valid */
1061 lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1062 ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1063 ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1064 ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1065 if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1066 lbasize48 > ATA_MAX_28BIT_LBA)
1067 dp->sectors = lbasize48;
1071 adasendorderedtag(void *arg)
1073 struct ada_softc *softc = arg;
1075 if (ada_send_ordered) {
1076 if ((softc->ordered_tag_count == 0)
1077 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1078 softc->flags |= ADA_FLAG_NEED_OTAG;
1080 if (softc->outstanding_cmds > 0)
1081 softc->flags &= ~ADA_FLAG_WENT_IDLE;
1083 softc->ordered_tag_count = 0;
1085 /* Queue us up again */
1086 callout_reset(&softc->sendordered_c,
1087 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
1088 adasendorderedtag, softc);
1092 * Step through all ADA peripheral drivers, and if the device is still open,
1093 * sync the disk cache to physical media.
1096 adashutdown(void * arg, int howto)
1098 struct cam_periph *periph;
1099 struct ada_softc *softc;
1101 TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1104 cam_periph_lock(periph);
1105 softc = (struct ada_softc *)periph->softc;
1107 * We only sync the cache if the drive is still open, and
1108 * if the drive is capable of it..
1110 if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1111 (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1112 cam_periph_unlock(periph);
1116 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1118 ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1119 cam_fill_ataio(&ccb.ataio,
1126 ada_default_timeout*1000);
1128 if (softc->flags & ADA_FLAG_CAN_48BIT)
1129 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1131 ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
1132 xpt_polled_action(&ccb);
1134 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1135 xpt_print(periph->path, "Synchronize cache failed\n");
1137 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1138 cam_release_devq(ccb.ccb_h.path,
1142 /*getcount_only*/0);
1143 cam_periph_unlock(periph);
1147 #endif /* _KERNEL */