2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
37 #include <sys/taskqueue.h>
39 #include <sys/mutex.h>
41 #include <sys/devicestat.h>
42 #include <sys/eventhandler.h>
43 #include <sys/malloc.h>
45 #include <sys/reboot.h>
46 #include <geom/geom_disk.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_xpt_periph.h>
58 #include <cam/cam_sim.h>
60 #include <cam/ata/ata_all.h>
62 #include <machine/md_var.h> /* geometry translation */
66 #define ATA_MAX_28BIT_LBA 268435455UL
73 ADA_FLAG_PACK_INVALID = 0x001,
74 ADA_FLAG_CAN_48BIT = 0x002,
75 ADA_FLAG_CAN_FLUSHCACHE = 0x004,
76 ADA_FLAG_CAN_NCQ = 0x008,
77 ADA_FLAG_CAN_DMA = 0x010,
78 ADA_FLAG_NEED_OTAG = 0x020,
79 ADA_FLAG_WENT_IDLE = 0x040,
80 ADA_FLAG_CAN_TRIM = 0x080,
81 ADA_FLAG_OPEN = 0x100,
82 ADA_FLAG_SCTX_INIT = 0x200,
83 ADA_FLAG_CAN_CFA = 0x400,
84 ADA_FLAG_CAN_POWERMGT = 0x800
92 ADA_CCB_BUFFER_IO = 0x03,
93 ADA_CCB_WAITING = 0x04,
96 ADA_CCB_TYPE_MASK = 0x0F,
99 /* Offsets into our private area for storing information */
100 #define ccb_state ppriv_field0
101 #define ccb_bp ppriv_ptr1
105 u_int8_t secs_per_track;
107 u_int32_t secsize; /* Number of bytes/logical sector */
108 u_int64_t sectors; /* Total number sectors */
111 #define TRIM_MAX_BLOCKS 4
112 #define TRIM_MAX_RANGES TRIM_MAX_BLOCKS * 64
113 struct trim_request {
114 uint8_t data[TRIM_MAX_RANGES * 8];
115 struct bio *bps[TRIM_MAX_RANGES];
119 struct bio_queue_head bio_queue;
120 struct bio_queue_head trim_queue;
124 int ordered_tag_count;
125 int outstanding_cmds;
128 struct disk_params params;
130 struct task sysctl_task;
131 struct sysctl_ctx_list sysctl_ctx;
132 struct sysctl_oid *sysctl_tree;
133 struct callout sendordered_c;
134 struct trim_request trim_req;
137 struct ada_quirk_entry {
138 struct scsi_inquiry_pattern inq_pat;
142 static struct ada_quirk_entry ada_quirk_table[] =
147 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
148 /*vendor*/"*", /*product*/"*", /*revision*/"*"
154 static disk_strategy_t adastrategy;
155 static dumper_t adadump;
156 static periph_init_t adainit;
157 static void adaasync(void *callback_arg, u_int32_t code,
158 struct cam_path *path, void *arg);
159 static void adasysctlinit(void *context, int pending);
160 static periph_ctor_t adaregister;
161 static periph_dtor_t adacleanup;
162 static periph_start_t adastart;
163 static periph_oninv_t adaoninvalidate;
164 static void adadone(struct cam_periph *periph,
165 union ccb *done_ccb);
166 static int adaerror(union ccb *ccb, u_int32_t cam_flags,
167 u_int32_t sense_flags);
168 static void adagetparams(struct cam_periph *periph,
169 struct ccb_getdev *cgd);
170 static timeout_t adasendorderedtag;
171 static void adashutdown(void *arg, int howto);
173 #ifndef ADA_DEFAULT_TIMEOUT
174 #define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */
177 #ifndef ADA_DEFAULT_RETRY
178 #define ADA_DEFAULT_RETRY 4
181 #ifndef ADA_DEFAULT_SEND_ORDERED
182 #define ADA_DEFAULT_SEND_ORDERED 1
185 #ifndef ADA_DEFAULT_SPINDOWN_SHUTDOWN
186 #define ADA_DEFAULT_SPINDOWN_SHUTDOWN 1
190 * Most platforms map firmware geometry to actual, but some don't. If
191 * not overridden, default to nothing.
193 #ifndef ata_disk_firmware_geom_adjust
194 #define ata_disk_firmware_geom_adjust(disk)
197 static int ada_retry_count = ADA_DEFAULT_RETRY;
198 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
199 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
200 static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
202 SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
203 "CAM Direct Access Disk driver");
204 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
205 &ada_retry_count, 0, "Normal I/O retry count");
206 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
207 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
208 &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
209 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
210 SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW,
211 &ada_send_ordered, 0, "Send Ordered Tags");
212 TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered);
213 SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RW,
214 &ada_spindown_shutdown, 0, "Spin down upon shutdown");
215 TUNABLE_INT("kern.cam.ada.spindown_shutdown", &ada_spindown_shutdown);
218 * ADA_ORDEREDTAG_INTERVAL determines how often, relative
219 * to the default timeout, we check to see whether an ordered
220 * tagged transaction is appropriate to prevent simple tag
221 * starvation. Since we'd like to ensure that there is at least
222 * 1/2 of the timeout length left for a starved transaction to
223 * complete after we've sent an ordered tag, we must poll at least
224 * four times in every timeout period. This takes care of the worst
225 * case where a starved transaction starts during an interval that
226 * meets the requirement "don't send an ordered tag" test so it takes
227 * us two intervals to determine that a tag must be sent.
229 #ifndef ADA_ORDEREDTAG_INTERVAL
230 #define ADA_ORDEREDTAG_INTERVAL 4
233 static struct periph_driver adadriver =
236 TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
239 PERIPHDRIVER_DECLARE(ada, adadriver);
241 MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
244 adaopen(struct disk *dp)
246 struct cam_periph *periph;
247 struct ada_softc *softc;
251 periph = (struct cam_periph *)dp->d_drv1;
252 if (periph == NULL) {
256 if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
260 cam_periph_lock(periph);
261 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
262 cam_periph_unlock(periph);
263 cam_periph_release(periph);
267 unit = periph->unit_number;
268 softc = (struct ada_softc *)periph->softc;
269 softc->flags |= ADA_FLAG_OPEN;
271 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
272 ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit,
275 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
276 /* Invalidate our pack information. */
277 softc->flags &= ~ADA_FLAG_PACK_INVALID;
280 cam_periph_unhold(periph);
281 cam_periph_unlock(periph);
286 adaclose(struct disk *dp)
288 struct cam_periph *periph;
289 struct ada_softc *softc;
293 periph = (struct cam_periph *)dp->d_drv1;
297 cam_periph_lock(periph);
298 if ((error = cam_periph_hold(periph, PRIBIO)) != 0) {
299 cam_periph_unlock(periph);
300 cam_periph_release(periph);
304 softc = (struct ada_softc *)periph->softc;
305 /* We only sync the cache if the drive is capable of it. */
306 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
308 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
309 cam_fill_ataio(&ccb->ataio,
316 ada_default_timeout*1000);
318 if (softc->flags & ADA_FLAG_CAN_48BIT)
319 ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
321 ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
322 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
323 /*sense_flags*/0, softc->disk->d_devstat);
325 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
326 xpt_print(periph->path, "Synchronize cache failed\n");
328 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
329 cam_release_devq(ccb->ccb_h.path,
334 xpt_release_ccb(ccb);
337 softc->flags &= ~ADA_FLAG_OPEN;
338 cam_periph_unhold(periph);
339 cam_periph_unlock(periph);
340 cam_periph_release(periph);
345 adaschedule(struct cam_periph *periph)
347 struct ada_softc *softc = (struct ada_softc *)periph->softc;
349 if (bioq_first(&softc->bio_queue) ||
350 (!softc->trim_running && bioq_first(&softc->trim_queue))) {
351 /* Have more work to do, so ensure we stay scheduled */
352 xpt_schedule(periph, CAM_PRIORITY_NORMAL);
357 * Actually translate the requested transfer into one the physical driver
358 * can understand. The transfer is described by a buf and will include
359 * only one physical transfer.
362 adastrategy(struct bio *bp)
364 struct cam_periph *periph;
365 struct ada_softc *softc;
367 periph = (struct cam_periph *)bp->bio_disk->d_drv1;
368 if (periph == NULL) {
369 biofinish(bp, NULL, ENXIO);
372 softc = (struct ada_softc *)periph->softc;
374 cam_periph_lock(periph);
377 * If the device has been made invalid, error out
379 if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
380 cam_periph_unlock(periph);
381 biofinish(bp, NULL, ENXIO);
386 * Place it in the queue of disk activities for this disk
388 if (bp->bio_cmd == BIO_DELETE &&
389 (softc->flags & ADA_FLAG_CAN_TRIM))
390 bioq_disksort(&softc->trim_queue, bp);
392 bioq_disksort(&softc->bio_queue, bp);
395 * Schedule ourselves for performing the work.
398 cam_periph_unlock(periph);
404 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
406 struct cam_periph *periph;
407 struct ada_softc *softc;
418 softc = (struct ada_softc *)periph->softc;
419 cam_periph_lock(periph);
420 secsize = softc->params.secsize;
421 lba = offset / secsize;
422 count = length / secsize;
424 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
425 cam_periph_unlock(periph);
430 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
431 ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
432 cam_fill_ataio(&ccb.ataio,
437 (u_int8_t *) virtual,
439 ada_default_timeout*1000);
440 if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
441 (lba + count >= ATA_MAX_28BIT_LBA ||
443 ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
446 ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
449 xpt_polled_action(&ccb);
451 if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
452 printf("Aborting dump due to I/O error.\n");
453 cam_periph_unlock(periph);
456 cam_periph_unlock(periph);
460 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
461 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
463 ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
464 cam_fill_ataio(&ccb.ataio,
471 ada_default_timeout*1000);
473 if (softc->flags & ADA_FLAG_CAN_48BIT)
474 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
476 ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
477 xpt_polled_action(&ccb);
479 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
480 xpt_print(periph->path, "Synchronize cache failed\n");
482 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
483 cam_release_devq(ccb.ccb_h.path,
489 cam_periph_unlock(periph);
499 * Install a global async callback. This callback will
500 * receive async callbacks like "new device found".
502 status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
504 if (status != CAM_REQ_CMP) {
505 printf("ada: Failed to attach master async callback "
506 "due to status 0x%x!\n", status);
507 } else if (ada_send_ordered) {
509 /* Register our shutdown event handler */
510 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
511 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
512 printf("adainit: shutdown event registration failed!\n");
517 adaoninvalidate(struct cam_periph *periph)
519 struct ada_softc *softc;
521 softc = (struct ada_softc *)periph->softc;
524 * De-register any async callbacks.
526 xpt_register_async(0, adaasync, periph, periph->path);
528 softc->flags |= ADA_FLAG_PACK_INVALID;
531 * Return all queued I/O with ENXIO.
532 * XXX Handle any transactions queued to the card
533 * with XPT_ABORT_CCB.
535 bioq_flush(&softc->bio_queue, NULL, ENXIO);
536 bioq_flush(&softc->trim_queue, NULL, ENXIO);
538 disk_gone(softc->disk);
539 xpt_print(periph->path, "lost device\n");
543 adacleanup(struct cam_periph *periph)
545 struct ada_softc *softc;
547 softc = (struct ada_softc *)periph->softc;
549 xpt_print(periph->path, "removing device entry\n");
550 cam_periph_unlock(periph);
553 * If we can't free the sysctl tree, oh well...
555 if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
556 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
557 xpt_print(periph->path, "can't remove sysctl context\n");
560 disk_destroy(softc->disk);
561 callout_drain(&softc->sendordered_c);
562 free(softc, M_DEVBUF);
563 cam_periph_lock(periph);
567 adaasync(void *callback_arg, u_int32_t code,
568 struct cam_path *path, void *arg)
570 struct cam_periph *periph;
572 periph = (struct cam_periph *)callback_arg;
574 case AC_FOUND_DEVICE:
576 struct ccb_getdev *cgd;
579 cgd = (struct ccb_getdev *)arg;
583 if (cgd->protocol != PROTO_ATA)
587 * Allocate a peripheral instance for
588 * this device and start the probe
591 status = cam_periph_alloc(adaregister, adaoninvalidate,
592 adacleanup, adastart,
593 "ada", CAM_PERIPH_BIO,
594 cgd->ccb_h.path, adaasync,
595 AC_FOUND_DEVICE, cgd);
597 if (status != CAM_REQ_CMP
598 && status != CAM_REQ_INPROG)
599 printf("adaasync: Unable to attach to new device "
600 "due to status 0x%x\n", status);
604 cam_periph_async(periph, code, path, arg);
610 adasysctlinit(void *context, int pending)
612 struct cam_periph *periph;
613 struct ada_softc *softc;
614 char tmpstr[80], tmpstr2[80];
616 periph = (struct cam_periph *)context;
617 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
620 softc = (struct ada_softc *)periph->softc;
621 snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
622 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
624 sysctl_ctx_init(&softc->sysctl_ctx);
625 softc->flags |= ADA_FLAG_SCTX_INIT;
626 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
627 SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
628 CTLFLAG_RD, 0, tmpstr);
629 if (softc->sysctl_tree == NULL) {
630 printf("adasysctlinit: unable to allocate sysctl tree\n");
631 cam_periph_release(periph);
635 cam_periph_release(periph);
639 adaregister(struct cam_periph *periph, void *arg)
641 struct ada_softc *softc;
642 struct ccb_pathinq cpi;
643 struct ccb_getdev *cgd;
644 char announce_buf[80];
645 struct disk_params *dp;
649 cgd = (struct ccb_getdev *)arg;
650 if (periph == NULL) {
651 printf("adaregister: periph was NULL!!\n");
652 return(CAM_REQ_CMP_ERR);
656 printf("adaregister: no getdev CCB, can't register device\n");
657 return(CAM_REQ_CMP_ERR);
660 softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
664 printf("adaregister: Unable to probe new device. "
665 "Unable to allocate softc\n");
666 return(CAM_REQ_CMP_ERR);
669 bioq_init(&softc->bio_queue);
670 bioq_init(&softc->trim_queue);
672 if (cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA)
673 softc->flags |= ADA_FLAG_CAN_DMA;
674 if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
675 softc->flags |= ADA_FLAG_CAN_48BIT;
676 if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
677 softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
678 if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
679 softc->flags |= ADA_FLAG_CAN_POWERMGT;
680 if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
681 cgd->inq_flags & SID_CmdQue)
682 softc->flags |= ADA_FLAG_CAN_NCQ;
683 if (cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) {
684 softc->flags |= ADA_FLAG_CAN_TRIM;
685 softc->trim_max_ranges = TRIM_MAX_RANGES;
686 if (cgd->ident_data.max_dsm_blocks != 0) {
687 softc->trim_max_ranges =
688 min(cgd->ident_data.max_dsm_blocks * 64,
689 softc->trim_max_ranges);
692 if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
693 softc->flags |= ADA_FLAG_CAN_CFA;
694 softc->state = ADA_STATE_NORMAL;
696 periph->softc = softc;
699 * See if this device has any quirks.
701 match = cam_quirkmatch((caddr_t)&cgd->ident_data,
702 (caddr_t)ada_quirk_table,
703 sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
704 sizeof(*ada_quirk_table), ata_identify_match);
706 softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
708 softc->quirks = ADA_Q_NONE;
710 bzero(&cpi, sizeof(cpi));
711 xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE);
712 cpi.ccb_h.func_code = XPT_PATH_INQ;
713 xpt_action((union ccb *)&cpi);
715 TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
718 * Register this media as a disk
720 mtx_unlock(periph->sim->mtx);
721 adagetparams(periph, cgd);
722 softc->disk = disk_alloc();
723 softc->disk->d_open = adaopen;
724 softc->disk->d_close = adaclose;
725 softc->disk->d_strategy = adastrategy;
726 softc->disk->d_dump = adadump;
727 softc->disk->d_name = "ada";
728 softc->disk->d_drv1 = periph;
729 maxio = cpi.maxio; /* Honor max I/O size of SIM */
731 maxio = DFLTPHYS; /* traditional default */
732 else if (maxio > MAXPHYS)
733 maxio = MAXPHYS; /* for safety */
734 if (softc->flags & ADA_FLAG_CAN_48BIT)
735 maxio = min(maxio, 65536 * softc->params.secsize);
736 else /* 28bit ATA command limit */
737 maxio = min(maxio, 256 * softc->params.secsize);
738 softc->disk->d_maxsize = maxio;
739 softc->disk->d_unit = periph->unit_number;
740 softc->disk->d_flags = 0;
741 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
742 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
743 if ((softc->flags & ADA_FLAG_CAN_TRIM) ||
744 ((softc->flags & ADA_FLAG_CAN_CFA) &&
745 !(softc->flags & ADA_FLAG_CAN_48BIT)))
746 softc->disk->d_flags |= DISKFLAG_CANDELETE;
747 strlcpy(softc->disk->d_ident, cgd->serial_num,
748 MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1));
750 softc->disk->d_sectorsize = softc->params.secsize;
751 softc->disk->d_mediasize = (off_t)softc->params.sectors *
752 softc->params.secsize;
753 if (ata_physical_sector_size(&cgd->ident_data) !=
754 softc->params.secsize) {
755 softc->disk->d_stripesize =
756 ata_physical_sector_size(&cgd->ident_data);
757 softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
758 ata_logical_sector_offset(&cgd->ident_data)) %
759 softc->disk->d_stripesize;
761 softc->disk->d_fwsectors = softc->params.secs_per_track;
762 softc->disk->d_fwheads = softc->params.heads;
763 ata_disk_firmware_geom_adjust(softc->disk);
765 disk_create(softc->disk, DISK_VERSION);
766 mtx_lock(periph->sim->mtx);
769 snprintf(announce_buf, sizeof(announce_buf),
770 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
771 (uintmax_t)(((uintmax_t)dp->secsize *
772 dp->sectors) / (1024*1024)),
773 (uintmax_t)dp->sectors,
774 dp->secsize, dp->heads,
775 dp->secs_per_track, dp->cylinders);
776 xpt_announce_periph(periph, announce_buf);
778 * Add async callbacks for bus reset and
779 * bus device reset calls. I don't bother
780 * checking if this fails as, in most cases,
781 * the system will function just fine without
782 * them and the only alternative would be to
783 * not attach the device on failure.
785 xpt_register_async(AC_LOST_DEVICE,
786 adaasync, periph, periph->path);
789 * Schedule a periodic event to occasionally send an
790 * ordered tag to a device.
792 callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
793 callout_reset(&softc->sendordered_c,
794 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
795 adasendorderedtag, softc);
801 adastart(struct cam_periph *periph, union ccb *start_ccb)
803 struct ada_softc *softc = (struct ada_softc *)periph->softc;
804 struct ccb_ataio *ataio = &start_ccb->ataio;
806 switch (softc->state) {
807 case ADA_STATE_NORMAL:
812 /* Execute immediate CCB if waiting. */
813 if (periph->immediate_priority <= periph->pinfo.priority) {
814 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
815 ("queuing for immediate ccb\n"));
816 start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
817 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
819 periph->immediate_priority = CAM_PRIORITY_NONE;
820 wakeup(&periph->ccb_list);
821 /* Have more work to do, so ensure we stay scheduled */
825 /* Run TRIM if not running yet. */
826 if (!softc->trim_running &&
827 (bp = bioq_first(&softc->trim_queue)) != 0) {
828 struct trim_request *req = &softc->trim_req;
830 int bps = 0, ranges = 0;
832 softc->trim_running = 1;
833 bzero(req, sizeof(*req));
836 uint64_t lba = bp1->bio_pblkno;
837 int count = bp1->bio_bcount /
838 softc->params.secsize;
840 bioq_remove(&softc->trim_queue, bp1);
842 int c = min(count, 0xffff);
843 int off = ranges * 8;
845 req->data[off + 0] = lba & 0xff;
846 req->data[off + 1] = (lba >> 8) & 0xff;
847 req->data[off + 2] = (lba >> 16) & 0xff;
848 req->data[off + 3] = (lba >> 24) & 0xff;
849 req->data[off + 4] = (lba >> 32) & 0xff;
850 req->data[off + 5] = (lba >> 40) & 0xff;
851 req->data[off + 6] = c & 0xff;
852 req->data[off + 7] = (c >> 8) & 0xff;
857 req->bps[bps++] = bp1;
858 bp1 = bioq_first(&softc->trim_queue);
860 bp1->bio_bcount / softc->params.secsize >
861 (softc->trim_max_ranges - ranges) * 0xffff)
864 cam_fill_ataio(ataio,
870 ((ranges + 63) / 64) * 512,
871 ada_default_timeout * 1000);
872 ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
873 ATA_DSM_TRIM, 0, (ranges + 63) / 64);
874 start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
877 /* Run regular command. */
878 bp = bioq_first(&softc->bio_queue);
880 xpt_release_ccb(start_ccb);
883 bioq_remove(&softc->bio_queue, bp);
885 if ((bp->bio_flags & BIO_ORDERED) != 0
886 || (softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
887 softc->flags &= ~ADA_FLAG_NEED_OTAG;
888 softc->ordered_tag_count++;
893 switch (bp->bio_cmd) {
897 uint64_t lba = bp->bio_pblkno;
898 uint16_t count = bp->bio_bcount / softc->params.secsize;
900 cam_fill_ataio(ataio,
903 bp->bio_cmd == BIO_READ ?
904 CAM_DIR_IN : CAM_DIR_OUT,
908 ada_default_timeout*1000);
910 if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
911 if (bp->bio_cmd == BIO_READ) {
912 ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
915 ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
918 } else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
919 (lba + count >= ATA_MAX_28BIT_LBA ||
921 if (softc->flags & ADA_FLAG_CAN_DMA) {
922 if (bp->bio_cmd == BIO_READ) {
923 ata_48bit_cmd(ataio, ATA_READ_DMA48,
926 ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
930 if (bp->bio_cmd == BIO_READ) {
931 ata_48bit_cmd(ataio, ATA_READ_MUL48,
934 ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
941 if (softc->flags & ADA_FLAG_CAN_DMA) {
942 if (bp->bio_cmd == BIO_READ) {
943 ata_28bit_cmd(ataio, ATA_READ_DMA,
946 ata_28bit_cmd(ataio, ATA_WRITE_DMA,
950 if (bp->bio_cmd == BIO_READ) {
951 ata_28bit_cmd(ataio, ATA_READ_MUL,
954 ata_28bit_cmd(ataio, ATA_WRITE_MUL,
963 uint64_t lba = bp->bio_pblkno;
964 uint16_t count = bp->bio_bcount / softc->params.secsize;
966 cam_fill_ataio(ataio,
973 ada_default_timeout*1000);
977 ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
981 cam_fill_ataio(ataio,
988 ada_default_timeout*1000);
990 if (softc->flags & ADA_FLAG_CAN_48BIT)
991 ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
993 ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
996 start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
998 start_ccb->ccb_h.ccb_bp = bp;
999 softc->outstanding_cmds++;
1000 xpt_action(start_ccb);
1002 /* May have more work to do, so ensure we stay scheduled */
1003 adaschedule(periph);
1010 adadone(struct cam_periph *periph, union ccb *done_ccb)
1012 struct ada_softc *softc;
1013 struct ccb_ataio *ataio;
1015 softc = (struct ada_softc *)periph->softc;
1016 ataio = &done_ccb->ataio;
1017 switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
1018 case ADA_CCB_BUFFER_IO:
1023 bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1024 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1027 error = adaerror(done_ccb, 0, 0);
1028 if (error == ERESTART) {
1029 /* A retry was scheduled, so just return. */
1033 if (error == ENXIO) {
1035 * Catastrophic error. Mark our pack as
1039 * XXX See if this is really a media
1042 xpt_print(periph->path,
1043 "Invalidating pack\n");
1044 softc->flags |= ADA_FLAG_PACK_INVALID;
1046 bp->bio_error = error;
1047 bp->bio_resid = bp->bio_bcount;
1048 bp->bio_flags |= BIO_ERROR;
1050 bp->bio_resid = ataio->resid;
1052 if (bp->bio_resid != 0)
1053 bp->bio_flags |= BIO_ERROR;
1055 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1056 cam_release_devq(done_ccb->ccb_h.path,
1060 /*getcount_only*/0);
1062 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1063 panic("REQ_CMP with QFRZN");
1064 bp->bio_resid = ataio->resid;
1065 if (ataio->resid > 0)
1066 bp->bio_flags |= BIO_ERROR;
1068 softc->outstanding_cmds--;
1069 if (softc->outstanding_cmds == 0)
1070 softc->flags |= ADA_FLAG_WENT_IDLE;
1071 if ((ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) ==
1073 struct trim_request *req =
1074 (struct trim_request *)ataio->data_ptr;
1077 for (i = 1; i < softc->trim_max_ranges &&
1079 struct bio *bp1 = req->bps[i];
1081 bp1->bio_resid = bp->bio_resid;
1082 bp1->bio_error = bp->bio_error;
1083 if (bp->bio_flags & BIO_ERROR)
1084 bp1->bio_flags |= BIO_ERROR;
1087 softc->trim_running = 0;
1089 adaschedule(periph);
1094 case ADA_CCB_WAITING:
1096 /* Caller will release the CCB */
1097 wakeup(&done_ccb->ccb_h.cbfcnp);
1101 /* No-op. We're polling */
1106 xpt_release_ccb(done_ccb);
1110 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1112 struct ada_softc *softc;
1113 struct cam_periph *periph;
1115 periph = xpt_path_periph(ccb->ccb_h.path);
1116 softc = (struct ada_softc *)periph->softc;
1118 return(cam_periph_error(ccb, cam_flags, sense_flags, NULL));
1122 adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
1124 struct ada_softc *softc = (struct ada_softc *)periph->softc;
1125 struct disk_params *dp = &softc->params;
1126 u_int64_t lbasize48;
1129 dp->secsize = ata_logical_sector_size(&cgd->ident_data);
1130 if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1131 cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1132 dp->heads = cgd->ident_data.current_heads;
1133 dp->secs_per_track = cgd->ident_data.current_sectors;
1134 dp->cylinders = cgd->ident_data.cylinders;
1135 dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1136 ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1138 dp->heads = cgd->ident_data.heads;
1139 dp->secs_per_track = cgd->ident_data.sectors;
1140 dp->cylinders = cgd->ident_data.cylinders;
1141 dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1143 lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1144 ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1146 /* use the 28bit LBA size if valid or bigger than the CHS mapping */
1147 if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1148 dp->sectors = lbasize;
1150 /* use the 48bit LBA size if valid */
1151 lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1152 ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1153 ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1154 ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1155 if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1156 lbasize48 > ATA_MAX_28BIT_LBA)
1157 dp->sectors = lbasize48;
1161 adasendorderedtag(void *arg)
1163 struct ada_softc *softc = arg;
1165 if (ada_send_ordered) {
1166 if ((softc->ordered_tag_count == 0)
1167 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1168 softc->flags |= ADA_FLAG_NEED_OTAG;
1170 if (softc->outstanding_cmds > 0)
1171 softc->flags &= ~ADA_FLAG_WENT_IDLE;
1173 softc->ordered_tag_count = 0;
1175 /* Queue us up again */
1176 callout_reset(&softc->sendordered_c,
1177 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
1178 adasendorderedtag, softc);
1182 * Step through all ADA peripheral drivers, and if the device is still open,
1183 * sync the disk cache to physical media.
1186 adashutdown(void * arg, int howto)
1188 struct cam_periph *periph;
1189 struct ada_softc *softc;
1191 TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1194 /* If we paniced with lock held - not recurse here. */
1195 if (cam_periph_owned(periph))
1197 cam_periph_lock(periph);
1198 softc = (struct ada_softc *)periph->softc;
1200 * We only sync the cache if the drive is still open, and
1201 * if the drive is capable of it..
1203 if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1204 (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1205 cam_periph_unlock(periph);
1209 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1211 ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1212 cam_fill_ataio(&ccb.ataio,
1219 ada_default_timeout*1000);
1221 if (softc->flags & ADA_FLAG_CAN_48BIT)
1222 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1224 ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
1225 xpt_polled_action(&ccb);
1227 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1228 xpt_print(periph->path, "Synchronize cache failed\n");
1230 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1231 cam_release_devq(ccb.ccb_h.path,
1235 /*getcount_only*/0);
1236 cam_periph_unlock(periph);
1239 if (ada_spindown_shutdown == 0 ||
1240 (howto & (RB_HALT | RB_POWEROFF)) == 0)
1243 TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1246 /* If we paniced with lock held - not recurse here. */
1247 if (cam_periph_owned(periph))
1249 cam_periph_lock(periph);
1250 softc = (struct ada_softc *)periph->softc;
1252 * We only spin-down the drive if it is capable of it..
1254 if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1255 cam_periph_unlock(periph);
1260 xpt_print(periph->path, "spin-down\n");
1262 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1264 ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1265 cam_fill_ataio(&ccb.ataio,
1272 ada_default_timeout*1000);
1274 ata_28bit_cmd(&ccb.ataio, ATA_STANDBY_IMMEDIATE, 0, 0, 0);
1275 xpt_polled_action(&ccb);
1277 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1278 xpt_print(periph->path, "Spin-down disk failed\n");
1280 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1281 cam_release_devq(ccb.ccb_h.path,
1285 /*getcount_only*/0);
1286 cam_periph_unlock(periph);
1290 #endif /* _KERNEL */