2 * Implementation of SCSI Direct Access Peripheral driver for CAM.
4 * Copyright (c) 1997 Justin T. Gibbs.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include "opt_hw_wdog.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
37 #include <sys/devicestat.h>
38 #include <sys/dkbad.h>
39 #include <sys/disklabel.h>
40 #include <sys/diskslice.h>
41 #include <sys/eventhandler.h>
42 #include <sys/malloc.h>
46 #include <machine/md_var.h>
49 #include <vm/vm_prot.h>
53 #include <cam/cam_ccb.h>
54 #include <cam/cam_extend.h>
55 #include <cam/cam_periph.h>
56 #include <cam/cam_xpt_periph.h>
58 #include <cam/scsi/scsi_message.h>
66 DA_FLAG_PACK_INVALID = 0x001,
67 DA_FLAG_NEW_PACK = 0x002,
68 DA_FLAG_PACK_LOCKED = 0x004,
69 DA_FLAG_PACK_REMOVABLE = 0x008,
70 DA_FLAG_TAGGED_QUEUING = 0x010,
71 DA_FLAG_NEED_OTAG = 0x020,
72 DA_FLAG_WENT_IDLE = 0x040,
73 DA_FLAG_RETRY_UA = 0x080,
79 DA_Q_NO_SYNC_CACHE = 0x01,
85 DA_CCB_BUFFER_IO = 0x02,
86 DA_CCB_WAITING = 0x03,
88 DA_CCB_TYPE_MASK = 0x0F,
89 DA_CCB_RETRY_UA = 0x10
92 /* Offsets into our private area for storing information */
93 #define ccb_state ppriv_field0
94 #define ccb_bp ppriv_ptr1
99 u_int8_t secs_per_track;
100 u_int32_t secsize; /* Number of bytes/sector */
101 u_int32_t sectors; /* total number sectors */
105 struct buf_queue_head buf_queue;
106 struct devstat device_stats;
107 SLIST_ENTRY(da_softc) links;
108 LIST_HEAD(, ccb_hdr) pending_ccbs;
112 int minimum_cmd_size;
113 int ordered_tag_count;
114 struct disk_params params;
115 struct diskslices *dk_slices; /* virtual drives */
119 struct da_quirk_entry {
120 struct scsi_inquiry_pattern inq_pat;
124 static struct da_quirk_entry da_quirk_table[] =
128 * This particular Fujitsu drive doesn't like the
129 * synchronize cache command.
130 * Reported by: Tom Jackson <toj@gorilla.net>
132 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
133 /*quirks*/ DA_Q_NO_SYNC_CACHE
138 * This drive doesn't like the synchronize cache command
139 * either. Reported by: Matthew Jacob <mjacob@feral.com>
140 * in NetBSD PR kern/6027, August 24, 1998.
142 {T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2217*", "*"},
143 /*quirks*/ DA_Q_NO_SYNC_CACHE
147 * This drive doesn't like the synchronize cache command
148 * either. Reported by: Hellmuth Michaelis (hm@kts.org)
151 {T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2112*", "*"},
152 /*quirks*/ DA_Q_NO_SYNC_CACHE
156 * Doesn't like the synchronize cache command.
157 * Reported by: Blaz Zupan <blaz@gold.amis.net>
159 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
160 /*quirks*/ DA_Q_NO_SYNC_CACHE
164 * Doesn't work correctly with 6 byte reads/writes.
165 * Returns illegal request, and points to byte 9 of the
167 * Reported by: Adam McDougall <bsdx@spawnet.com>
169 {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 4*", "*"},
170 /*quirks*/ DA_Q_NO_6_BYTE
176 {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 2*", "*"},
177 /*quirks*/ DA_Q_NO_6_BYTE
181 static d_open_t daopen;
182 static d_close_t daclose;
183 static d_strategy_t dastrategy;
184 static d_ioctl_t daioctl;
185 static d_dump_t dadump;
186 static d_psize_t dasize;
187 static periph_init_t dainit;
188 static void daasync(void *callback_arg, u_int32_t code,
189 struct cam_path *path, void *arg);
190 static periph_ctor_t daregister;
191 static periph_dtor_t dacleanup;
192 static periph_start_t dastart;
193 static periph_oninv_t daoninvalidate;
194 static void dadone(struct cam_periph *periph,
195 union ccb *done_ccb);
196 static int daerror(union ccb *ccb, u_int32_t cam_flags,
197 u_int32_t sense_flags);
198 static void daprevent(struct cam_periph *periph, int action);
199 static void dasetgeom(struct cam_periph *periph,
200 struct scsi_read_capacity_data * rdcap);
201 static timeout_t dasendorderedtag;
202 static void dashutdown(void *arg, int howto);
204 #ifndef DA_DEFAULT_TIMEOUT
205 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */
209 * DA_ORDEREDTAG_INTERVAL determines how often, relative
210 * to the default timeout, we check to see whether an ordered
211 * tagged transaction is appropriate to prevent simple tag
212 * starvation. Since we'd like to ensure that there is at least
213 * 1/2 of the timeout length left for a starved transaction to
214 * complete after we've sent an ordered tag, we must poll at least
215 * four times in every timeout period. This takes care of the worst
216 * case where a starved transaction starts during an interval that
217 * meets the requirement "don't send an ordered tag" test so it takes
218 * us two intervals to determine that a tag must be sent.
220 #ifndef DA_ORDEREDTAG_INTERVAL
221 #define DA_ORDEREDTAG_INTERVAL 4
224 static struct periph_driver dadriver =
227 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
230 DATA_SET(periphdriver_set, dadriver);
232 #define DA_CDEV_MAJOR 13
233 #define DA_BDEV_MAJOR 4
235 /* For 2.2-stable support */
240 static struct cdevsw da_cdevsw = {
244 /* write */ physwrite,
248 /* strategy */ dastrategy,
250 /* maj */ DA_CDEV_MAJOR,
254 /* bmaj */ DA_BDEV_MAJOR
257 static SLIST_HEAD(,da_softc) softc_list;
258 static struct extend_array *daperiphs;
261 daopen(dev_t dev, int flags, int fmt, struct proc *p)
263 struct cam_periph *periph;
264 struct da_softc *softc;
265 struct disklabel label;
273 periph = cam_extend_get(daperiphs, unit);
277 softc = (struct da_softc *)periph->softc;
279 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
280 ("daopen: dev=%s (unit %d , partition %d)\n", devtoname(dev),
283 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
284 return (error); /* error code from tsleep */
287 if ((softc->flags & DA_FLAG_OPEN) == 0) {
288 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
290 softc->flags |= DA_FLAG_OPEN;
294 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
296 * If any partition is open, although the disk has
297 * been invalidated, disallow further opens.
299 if (dsisopen(softc->dk_slices)) {
301 cam_periph_unlock(periph);
305 /* Invalidate our pack information. */
306 dsgone(&softc->dk_slices);
307 softc->flags &= ~DA_FLAG_PACK_INVALID;
311 /* Do a read capacity */
313 struct scsi_read_capacity_data *rcap;
316 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
320 ccb = cam_periph_getccb(periph, /*priority*/1);
321 scsi_read_capacity(&ccb->csio,
328 ccb->ccb_h.ccb_bp = NULL;
330 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
331 /*sense_flags*/SF_RETRY_UA |
333 &softc->device_stats);
335 xpt_release_ccb(ccb);
338 dasetgeom(periph, rcap);
345 struct ccb_getdev cgd;
347 /* Build label for whole disk. */
348 bzero(&label, sizeof(label));
349 label.d_type = DTYPE_SCSI;
352 * Grab the inquiry data to get the vendor and product names.
353 * Put them in the typename and packname for the label.
355 xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1);
356 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
357 xpt_action((union ccb *)&cgd);
359 strncpy(label.d_typename, cgd.inq_data.vendor,
360 min(SID_VENDOR_SIZE, sizeof(label.d_typename)));
361 strncpy(label.d_packname, cgd.inq_data.product,
362 min(SID_PRODUCT_SIZE, sizeof(label.d_packname)));
364 label.d_secsize = softc->params.secsize;
365 label.d_nsectors = softc->params.secs_per_track;
366 label.d_ntracks = softc->params.heads;
367 label.d_ncylinders = softc->params.cylinders;
368 label.d_secpercyl = softc->params.heads
369 * softc->params.secs_per_track;
370 label.d_secperunit = softc->params.sectors;
372 if ((dsisopen(softc->dk_slices) == 0)
373 && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
374 daprevent(periph, PR_PREVENT);
377 /* Initialize slice tables. */
378 error = dsopen(dev, fmt, 0, &softc->dk_slices, &label);
381 * Check to see whether or not the blocksize is set yet.
382 * If it isn't, set it and then clear the blocksize
383 * unavailable flag for the device statistics.
385 if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){
386 softc->device_stats.block_size = softc->params.secsize;
387 softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE;
392 if ((dsisopen(softc->dk_slices) == 0)
393 && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) {
394 daprevent(periph, PR_ALLOW);
397 cam_periph_unlock(periph);
402 daclose(dev_t dev, int flag, int fmt, struct proc *p)
404 struct cam_periph *periph;
405 struct da_softc *softc;
410 periph = cam_extend_get(daperiphs, unit);
414 softc = (struct da_softc *)periph->softc;
416 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) {
417 return (error); /* error code from tsleep */
420 dsclose(dev, fmt, softc->dk_slices);
421 if (dsisopen(softc->dk_slices)) {
422 cam_periph_unlock(periph);
426 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
429 ccb = cam_periph_getccb(periph, /*priority*/1);
431 scsi_synchronize_cache(&ccb->csio,
435 /*begin_lba*/0,/* Cover the whole disk */
440 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
441 /*sense_flags*/SF_RETRY_UA,
442 &softc->device_stats);
444 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
445 if ((ccb->ccb_h.status & CAM_STATUS_MASK) ==
446 CAM_SCSI_STATUS_ERROR) {
448 int sense_key, error_code;
450 scsi_extract_sense(&ccb->csio.sense_data,
454 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
455 scsi_sense_print(&ccb->csio);
457 xpt_print_path(periph->path);
458 printf("Synchronize cache failed, status "
459 "== 0x%x, scsi status == 0x%x\n",
460 ccb->csio.ccb_h.status,
461 ccb->csio.scsi_status);
465 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
466 cam_release_devq(ccb->ccb_h.path,
472 xpt_release_ccb(ccb);
476 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) {
477 daprevent(periph, PR_ALLOW);
479 * If we've got removeable media, mark the blocksize as
480 * unavailable, since it could change when new media is
483 softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE;
486 softc->flags &= ~DA_FLAG_OPEN;
487 cam_periph_unlock(periph);
488 cam_periph_release(periph);
493 * Actually translate the requested transfer into one the physical driver
494 * can understand. The transfer is described by a buf and will include
495 * only one physical transfer.
498 dastrategy(struct buf *bp)
500 struct cam_periph *periph;
501 struct da_softc *softc;
506 unit = dkunit(bp->b_dev);
507 part = dkpart(bp->b_dev);
508 periph = cam_extend_get(daperiphs, unit);
509 if (periph == NULL) {
513 softc = (struct da_softc *)periph->softc;
516 * check it's not too big a transfer for our adapter
518 scsi_minphys(bp,&sd_switch);
522 * Do bounds checking, adjust transfer, set b_cylin and b_pbklno.
524 if (dscheck(bp, softc->dk_slices) <= 0)
528 * Mask interrupts so that the pack cannot be invalidated until
529 * after we are in the queue. Otherwise, we might not properly
530 * clean up one of the buffers.
535 * If the device has been made invalid, error out
537 if ((softc->flags & DA_FLAG_PACK_INVALID)) {
544 * Place it in the queue of disk activities for this disk
546 bufqdisksort(&softc->buf_queue, bp);
551 * Schedule ourselves for performing the work.
553 xpt_schedule(periph, /* XXX priority */1);
557 bp->b_flags |= B_ERROR;
561 * Correctly set the buf to indicate a completed xfer
563 bp->b_resid = bp->b_bcount;
568 /* For 2.2-stable support */
574 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
576 struct cam_periph *periph;
577 struct da_softc *softc;
582 periph = cam_extend_get(daperiphs, unit);
586 softc = (struct da_softc *)periph->softc;
588 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n"));
591 return (EINVAL); /* XXX */
593 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) {
594 return (error); /* error code from tsleep */
597 error = dsioctl(dev, cmd, addr, flag, &softc->dk_slices);
599 if (error == ENOIOCTL)
600 error = cam_periph_ioctl(periph, cmd, addr, daerror);
602 cam_periph_unlock(periph);
610 struct cam_periph *periph;
611 struct da_softc *softc;
612 struct disklabel *lp;
615 long num; /* number of sectors to write */
620 static int dadoingadump = 0;
621 struct ccb_scsiio csio;
623 /* toss any characters present prior to dump */
624 while (cncheckc() != -1)
629 periph = cam_extend_get(daperiphs, unit);
630 if (periph == NULL) {
633 softc = (struct da_softc *)periph->softc;
635 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0
636 || (softc->dk_slices == NULL)
637 || (lp = dsgetlabel(dev, softc->dk_slices)) == NULL)
640 /* Size of memory to dump, in disk sectors. */
641 /* XXX Fix up for non DEV_BSIZE sectors!!! */
642 num = (u_long)Maxmem * PAGE_SIZE / softc->params.secsize;
644 blkoff = lp->d_partitions[part].p_offset;
645 blkoff += softc->dk_slices->dss_slices[dkslice(dev)].ds_offset;
647 /* check transfer bounds against partition size */
648 if ((dumplo < 0) || ((dumplo + num) > lp->d_partitions[part].p_size))
651 if (dadoingadump != 0)
656 blknum = dumplo + blkoff;
657 blkcnt = PAGE_SIZE / softc->params.secsize;
659 addr = 0; /* starting address */
663 if (is_physical_memory(addr)) {
664 pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
665 trunc_page(addr), VM_PROT_READ, TRUE);
667 pmap_enter(kernel_pmap, (vm_offset_t)CADDR1,
668 trunc_page(0), VM_PROT_READ, TRUE);
671 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
672 csio.ccb_h.ccb_state = DA_CCB_DUMP;
673 scsi_read_write(&csio,
679 /*minimum_cmd_size*/ softc->minimum_cmd_size,
683 /*dxfer_len*/blkcnt * softc->params.secsize,
684 /*sense_len*/SSD_FULL_SIZE,
685 DA_DEFAULT_TIMEOUT * 1000);
686 xpt_polled_action((union ccb *)&csio);
688 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
689 printf("Aborting dump due to I/O error.\n");
690 if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
691 CAM_SCSI_STATUS_ERROR)
692 scsi_sense_print(&csio);
694 printf("status == 0x%x, scsi status == 0x%x\n",
695 csio.ccb_h.status, csio.scsi_status);
699 if (addr % (1024 * 1024) == 0) {
704 /* Count in MB of data left to write */
705 printf("%ld ", (num * softc->params.secsize)
709 /* update block count */
712 addr += blkcnt * softc->params.secsize;
714 /* operator aborting dump? */
715 if (cncheckc() != -1)
720 * Sync the disk cache contents to the physical media.
722 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
724 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1);
725 csio.ccb_h.ccb_state = DA_CCB_DUMP;
726 scsi_synchronize_cache(&csio,
730 /*begin_lba*/0,/* Cover the whole disk */
734 xpt_polled_action((union ccb *)&csio);
736 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
737 if ((csio.ccb_h.status & CAM_STATUS_MASK) ==
738 CAM_SCSI_STATUS_ERROR) {
740 int sense_key, error_code;
742 scsi_extract_sense(&csio.sense_data,
746 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
747 scsi_sense_print(&csio);
749 xpt_print_path(periph->path);
750 printf("Synchronize cache failed, status "
751 "== 0x%x, scsi status == 0x%x\n",
752 csio.ccb_h.status, csio.scsi_status);
762 struct cam_periph *periph;
763 struct da_softc *softc;
765 periph = cam_extend_get(daperiphs, dkunit(dev));
769 softc = (struct da_softc *)periph->softc;
771 return (dssize(dev, &softc->dk_slices));
778 struct cam_path *path;
781 * Create our extend array for storing the devices we attach to.
783 daperiphs = cam_extend_new();
784 SLIST_INIT(&softc_list);
785 if (daperiphs == NULL) {
786 printf("da: Failed to alloc extend array!\n");
791 * Install a global async callback. This callback will
792 * receive async callbacks like "new device found".
794 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
795 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
797 if (status == CAM_REQ_CMP) {
798 struct ccb_setasync csa;
800 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
801 csa.ccb_h.func_code = XPT_SASYNC_CB;
802 csa.event_enable = AC_FOUND_DEVICE;
803 csa.callback = daasync;
804 csa.callback_arg = NULL;
805 xpt_action((union ccb *)&csa);
806 status = csa.ccb_h.status;
810 if (status != CAM_REQ_CMP) {
811 printf("da: Failed to attach master async callback "
812 "due to status 0x%x!\n", status);
815 /* If we were successfull, register our devsw */
816 cdevsw_add(&da_cdevsw);
819 * Schedule a periodic event to occasioanly send an
820 * ordered tag to a device.
822 timeout(dasendorderedtag, NULL,
823 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
825 /* Register our shutdown event handler */
826 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
827 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
828 printf("dainit: shutdown event registration failed!\n");
833 daoninvalidate(struct cam_periph *periph)
836 struct da_softc *softc;
838 struct ccb_setasync csa;
840 softc = (struct da_softc *)periph->softc;
843 * De-register any async callbacks.
845 xpt_setup_ccb(&csa.ccb_h, periph->path,
847 csa.ccb_h.func_code = XPT_SASYNC_CB;
848 csa.event_enable = 0;
849 csa.callback = daasync;
850 csa.callback_arg = periph;
851 xpt_action((union ccb *)&csa);
853 softc->flags |= DA_FLAG_PACK_INVALID;
856 * Although the oninvalidate() routines are always called at
857 * splsoftcam, we need to be at splbio() here to keep the buffer
858 * queue from being modified while we traverse it.
863 * Return all queued I/O with ENXIO.
864 * XXX Handle any transactions queued to the card
865 * with XPT_ABORT_CCB.
867 while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){
868 bufq_remove(&softc->buf_queue, q_bp);
869 q_bp->b_resid = q_bp->b_bcount;
870 q_bp->b_error = ENXIO;
871 q_bp->b_flags |= B_ERROR;
876 SLIST_REMOVE(&softc_list, softc, da_softc, links);
878 xpt_print_path(periph->path);
879 printf("lost device\n");
883 dacleanup(struct cam_periph *periph)
885 struct da_softc *softc;
887 softc = (struct da_softc *)periph->softc;
889 devstat_remove_entry(&softc->device_stats);
890 cam_extend_release(daperiphs, periph->unit_number);
891 xpt_print_path(periph->path);
892 printf("removing device entry\n");
893 free(softc, M_DEVBUF);
897 daasync(void *callback_arg, u_int32_t code,
898 struct cam_path *path, void *arg)
900 struct cam_periph *periph;
902 periph = (struct cam_periph *)callback_arg;
904 case AC_FOUND_DEVICE:
906 struct ccb_getdev *cgd;
909 cgd = (struct ccb_getdev *)arg;
911 if ((cgd->pd_type != T_DIRECT) && (cgd->pd_type != T_OPTICAL))
915 * Allocate a peripheral instance for
916 * this device and start the probe
919 status = cam_periph_alloc(daregister, daoninvalidate,
921 "da", CAM_PERIPH_BIO,
922 cgd->ccb_h.path, daasync,
923 AC_FOUND_DEVICE, cgd);
925 if (status != CAM_REQ_CMP
926 && status != CAM_REQ_INPROG)
927 printf("daasync: Unable to attach to new device "
928 "due to status 0x%x\n", status);
934 struct da_softc *softc;
935 struct ccb_hdr *ccbh;
938 softc = (struct da_softc *)periph->softc;
941 * Don't fail on the expected unit attention
944 softc->flags |= DA_FLAG_RETRY_UA;
945 for (ccbh = LIST_FIRST(&softc->pending_ccbs);
946 ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le))
947 ccbh->ccb_state |= DA_CCB_RETRY_UA;
952 cam_periph_async(periph, code, path, arg);
958 daregister(struct cam_periph *periph, void *arg)
961 struct da_softc *softc;
962 struct ccb_setasync csa;
963 struct ccb_getdev *cgd;
966 cgd = (struct ccb_getdev *)arg;
967 if (periph == NULL) {
968 printf("daregister: periph was NULL!!\n");
969 return(CAM_REQ_CMP_ERR);
973 printf("daregister: no getdev CCB, can't register device\n");
974 return(CAM_REQ_CMP_ERR);
977 softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT);
980 printf("daregister: Unable to probe new device. "
981 "Unable to allocate softc\n");
982 return(CAM_REQ_CMP_ERR);
985 bzero(softc, sizeof(*softc));
986 LIST_INIT(&softc->pending_ccbs);
987 softc->state = DA_STATE_PROBE;
988 bufq_init(&softc->buf_queue);
989 if (SID_IS_REMOVABLE(&cgd->inq_data))
990 softc->flags |= DA_FLAG_PACK_REMOVABLE;
991 if ((cgd->inq_data.flags & SID_CmdQue) != 0)
992 softc->flags |= DA_FLAG_TAGGED_QUEUING;
994 periph->softc = softc;
996 cam_extend_set(daperiphs, periph->unit_number, periph);
999 * See if this device has any quirks.
1001 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
1002 (caddr_t)da_quirk_table,
1003 sizeof(da_quirk_table)/sizeof(*da_quirk_table),
1004 sizeof(*da_quirk_table), scsi_inquiry_match);
1007 softc->quirks = ((struct da_quirk_entry *)match)->quirks;
1009 softc->quirks = DA_Q_NONE;
1011 if (softc->quirks & DA_Q_NO_6_BYTE)
1012 softc->minimum_cmd_size = 10;
1014 softc->minimum_cmd_size = 6;
1017 * Block our timeout handler while we
1018 * add this softc to the dev list.
1021 SLIST_INSERT_HEAD(&softc_list, softc, links);
1025 * The DA driver supports a blocksize, but
1026 * we don't know the blocksize until we do
1027 * a read capacity. So, set a flag to
1028 * indicate that the blocksize is
1029 * unavailable right now. We'll clear the
1030 * flag as soon as we've done a read capacity.
1032 devstat_add_entry(&softc->device_stats, "da",
1033 periph->unit_number, 0,
1034 DEVSTAT_BS_UNAVAILABLE,
1035 cgd->pd_type | DEVSTAT_TYPE_IF_SCSI,
1036 DEVSTAT_PRIORITY_DA);
1039 * Add async callbacks for bus reset and
1040 * bus device reset calls. I don't bother
1041 * checking if this fails as, in most cases,
1042 * the system will function just fine without
1043 * them and the only alternative would be to
1044 * not attach the device on failure.
1046 xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5);
1047 csa.ccb_h.func_code = XPT_SASYNC_CB;
1048 csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE;
1049 csa.callback = daasync;
1050 csa.callback_arg = periph;
1051 xpt_action((union ccb *)&csa);
1053 * Lock this peripheral until we are setup.
1054 * This first call can't block
1056 (void)cam_periph_lock(periph, PRIBIO);
1057 xpt_schedule(periph, /*priority*/5);
1059 return(CAM_REQ_CMP);
1063 dastart(struct cam_periph *periph, union ccb *start_ccb)
1065 struct da_softc *softc;
1067 softc = (struct da_softc *)periph->softc;
1070 switch (softc->state) {
1071 case DA_STATE_NORMAL:
1073 /* Pull a buffer from the queue and get going on it */
1078 * See if there is a buf with work for us to do..
1081 bp = bufq_first(&softc->buf_queue);
1082 if (periph->immediate_priority <= periph->pinfo.priority) {
1083 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1084 ("queuing for immediate ccb\n"));
1085 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING;
1086 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1088 periph->immediate_priority = CAM_PRIORITY_NONE;
1090 wakeup(&periph->ccb_list);
1091 } else if (bp == NULL) {
1093 xpt_release_ccb(start_ccb);
1098 bufq_remove(&softc->buf_queue, bp);
1100 devstat_start_transaction(&softc->device_stats);
1102 if ((bp->b_flags & B_ORDERED) != 0
1103 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
1104 softc->flags &= ~DA_FLAG_NEED_OTAG;
1105 softc->ordered_tag_count++;
1106 tag_code = MSG_ORDERED_Q_TAG;
1108 tag_code = MSG_SIMPLE_Q_TAG;
1110 scsi_read_write(&start_ccb->csio,
1114 bp->b_flags & B_READ,
1116 softc->minimum_cmd_size,
1118 bp->b_bcount / softc->params.secsize,
1121 /*sense_len*/SSD_FULL_SIZE,
1122 DA_DEFAULT_TIMEOUT * 1000);
1123 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
1126 * Block out any asyncronous callbacks
1127 * while we touch the pending ccb list.
1130 LIST_INSERT_HEAD(&softc->pending_ccbs,
1131 &start_ccb->ccb_h, periph_links.le);
1134 /* We expect a unit attention from this device */
1135 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
1136 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
1137 softc->flags &= ~DA_FLAG_RETRY_UA;
1140 start_ccb->ccb_h.ccb_bp = bp;
1141 bp = bufq_first(&softc->buf_queue);
1144 xpt_action(start_ccb);
1148 /* Have more work to do, so ensure we stay scheduled */
1149 xpt_schedule(periph, /* XXX priority */1);
1153 case DA_STATE_PROBE:
1155 struct ccb_scsiio *csio;
1156 struct scsi_read_capacity_data *rcap;
1158 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap),
1162 printf("dastart: Couldn't malloc read_capacity data\n");
1163 /* da_free_periph??? */
1166 csio = &start_ccb->csio;
1167 scsi_read_capacity(csio,
1174 start_ccb->ccb_h.ccb_bp = NULL;
1175 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE;
1176 xpt_action(start_ccb);
1184 dadone(struct cam_periph *periph, union ccb *done_ccb)
1186 struct da_softc *softc;
1187 struct ccb_scsiio *csio;
1189 softc = (struct da_softc *)periph->softc;
1190 csio = &done_ccb->csio;
1191 switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) {
1192 case DA_CCB_BUFFER_IO:
1197 bp = (struct buf *)done_ccb->ccb_h.ccb_bp;
1198 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1203 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
1208 /* Retry selection timeouts */
1209 sf |= SF_RETRY_SELTO;
1211 if ((error = daerror(done_ccb, 0, sf)) == ERESTART) {
1213 * A retry was scheuled, so
1223 if (error == ENXIO) {
1225 * Catastrophic error. Mark our pack as
1228 /* XXX See if this is really a media
1231 xpt_print_path(periph->path);
1232 printf("Invalidating pack\n");
1233 softc->flags |= DA_FLAG_PACK_INVALID;
1237 * return all queued I/O with EIO, so that
1238 * the client can retry these I/Os in the
1239 * proper order should it attempt to recover.
1241 while ((q_bp = bufq_first(&softc->buf_queue))
1243 bufq_remove(&softc->buf_queue, q_bp);
1244 q_bp->b_resid = q_bp->b_bcount;
1245 q_bp->b_error = EIO;
1246 q_bp->b_flags |= B_ERROR;
1250 bp->b_error = error;
1251 bp->b_resid = bp->b_bcount;
1252 bp->b_flags |= B_ERROR;
1254 bp->b_resid = csio->resid;
1256 if (bp->b_resid != 0) {
1257 /* Short transfer ??? */
1258 bp->b_flags |= B_ERROR;
1261 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1262 cam_release_devq(done_ccb->ccb_h.path,
1266 /*getcount_only*/0);
1268 bp->b_resid = csio->resid;
1269 if (csio->resid > 0)
1270 bp->b_flags |= B_ERROR;
1274 * Block out any asyncronous callbacks
1275 * while we touch the pending ccb list.
1278 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
1281 devstat_end_transaction(&softc->device_stats,
1282 bp->b_bcount - bp->b_resid,
1283 done_ccb->csio.tag_action & 0xf,
1284 (bp->b_flags & B_READ) ? DEVSTAT_READ
1287 if (softc->device_stats.busy_count == 0)
1288 softc->flags |= DA_FLAG_WENT_IDLE;
1295 struct scsi_read_capacity_data *rdcap;
1296 char announce_buf[80];
1298 rdcap = (struct scsi_read_capacity_data *)csio->data_ptr;
1300 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1301 struct disk_params *dp;
1303 dasetgeom(periph, rdcap);
1304 dp = &softc->params;
1305 snprintf(announce_buf, sizeof(announce_buf),
1306 "%luMB (%u %u byte sectors: %dH %dS/T %dC)",
1307 (unsigned long) (((u_int64_t)dp->secsize *
1308 dp->sectors) / (1024*1024)), dp->sectors,
1309 dp->secsize, dp->heads, dp->secs_per_track,
1314 announce_buf[0] = '\0';
1317 * Retry any UNIT ATTENTION type errors. They
1318 * are expected at boot.
1320 error = daerror(done_ccb, 0, SF_RETRY_UA |
1321 SF_RETRY_SELTO | SF_NO_PRINT);
1322 if (error == ERESTART) {
1324 * A retry was scheuled, so
1328 } else if (error != 0) {
1329 struct scsi_sense_data *sense;
1331 int sense_key, error_code;
1334 struct ccb_getdev cgd;
1336 /* Don't wedge this device's queue */
1337 cam_release_devq(done_ccb->ccb_h.path,
1341 /*getcount_only*/0);
1343 status = done_ccb->ccb_h.status;
1345 xpt_setup_ccb(&cgd.ccb_h,
1346 done_ccb->ccb_h.path,
1348 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1349 xpt_action((union ccb *)&cgd);
1351 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0)
1352 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0)
1353 || ((status & CAM_AUTOSNS_VALID) == 0))
1359 sense = &csio->sense_data;
1360 scsi_extract_sense(sense, &error_code,
1365 * Attach to anything that claims to be a
1366 * direct access or optical disk device,
1367 * as long as it doesn't return a "Logical
1368 * unit not supported" (0x25) error.
1370 if ((have_sense) && (asc != 0x25)
1371 && (error_code == SSD_CURRENT_ERROR))
1372 snprintf(announce_buf,
1373 sizeof(announce_buf),
1374 "Attempt to query device "
1375 "size failed: %s, %s",
1376 scsi_sense_key_text[sense_key],
1377 scsi_sense_desc(asc,ascq,
1384 xpt_print_path(periph->path);
1385 printf("got CAM status %#x\n",
1386 done_ccb->ccb_h.status);
1389 xpt_print_path(periph->path);
1390 printf("fatal error, failed"
1391 " to attach to device\n");
1394 * Free up resources.
1396 cam_periph_invalidate(periph);
1400 free(rdcap, M_TEMP);
1401 if (announce_buf[0] != '\0')
1402 xpt_announce_periph(periph, announce_buf);
1403 softc->state = DA_STATE_NORMAL;
1405 * Since our peripheral may be invalidated by an error
1406 * above or an external event, we must release our CCB
1407 * before releasing the probe lock on the peripheral.
1408 * The peripheral will only go away once the last lock
1409 * is removed, and we need it around for the CCB release
1412 xpt_release_ccb(done_ccb);
1413 cam_periph_unlock(periph);
1416 case DA_CCB_WAITING:
1418 /* Caller will release the CCB */
1419 wakeup(&done_ccb->ccb_h.cbfcnp);
1423 /* No-op. We're polling */
1428 xpt_release_ccb(done_ccb);
1432 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1434 struct da_softc *softc;
1435 struct cam_periph *periph;
1437 periph = xpt_path_periph(ccb->ccb_h.path);
1438 softc = (struct da_softc *)periph->softc;
1442 * Until we have a better way of doing pack validation,
1443 * don't treat UAs as errors.
1445 sense_flags |= SF_RETRY_UA;
1446 return(cam_periph_error(ccb, cam_flags, sense_flags,
1447 &softc->saved_ccb));
1451 daprevent(struct cam_periph *periph, int action)
1453 struct da_softc *softc;
1457 softc = (struct da_softc *)periph->softc;
1459 if (((action == PR_ALLOW)
1460 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
1461 || ((action == PR_PREVENT)
1462 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
1466 ccb = cam_periph_getccb(periph, /*priority*/1);
1468 scsi_prevent(&ccb->csio,
1476 error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
1477 /*sense_flags*/0, &softc->device_stats);
1480 if (action == PR_ALLOW)
1481 softc->flags &= ~DA_FLAG_PACK_LOCKED;
1483 softc->flags |= DA_FLAG_PACK_LOCKED;
1486 xpt_release_ccb(ccb);
1490 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap)
1492 struct ccb_calc_geometry ccg;
1493 struct da_softc *softc;
1494 struct disk_params *dp;
1496 softc = (struct da_softc *)periph->softc;
1498 dp = &softc->params;
1499 dp->secsize = scsi_4btoul(rdcap->length);
1500 dp->sectors = scsi_4btoul(rdcap->addr) + 1;
1502 * Have the controller provide us with a geometry
1503 * for this disk. The only time the geometry
1504 * matters is when we boot and the controller
1505 * is the only one knowledgeable enough to come
1506 * up with something that will make this a bootable
1509 xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1);
1510 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
1511 ccg.block_size = dp->secsize;
1512 ccg.volume_size = dp->sectors;
1514 ccg.secs_per_track = 0;
1516 xpt_action((union ccb*)&ccg);
1517 dp->heads = ccg.heads;
1518 dp->secs_per_track = ccg.secs_per_track;
1519 dp->cylinders = ccg.cylinders;
1523 dasendorderedtag(void *arg)
1525 struct da_softc *softc;
1528 for (softc = SLIST_FIRST(&softc_list);
1530 softc = SLIST_NEXT(softc, links)) {
1532 if ((softc->ordered_tag_count == 0)
1533 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) {
1534 softc->flags |= DA_FLAG_NEED_OTAG;
1536 if (softc->device_stats.busy_count > 0)
1537 softc->flags &= ~DA_FLAG_WENT_IDLE;
1539 softc->ordered_tag_count = 0;
1542 /* Queue us up again */
1543 timeout(dasendorderedtag, NULL,
1544 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL);
1548 * Step through all DA peripheral drivers, and if the device is still open,
1549 * sync the disk cache to physical media.
1552 dashutdown(void * arg, int howto)
1554 struct cam_periph *periph;
1555 struct da_softc *softc;
1557 for (periph = TAILQ_FIRST(&dadriver.units); periph != NULL;
1558 periph = TAILQ_NEXT(periph, unit_links)) {
1560 softc = (struct da_softc *)periph->softc;
1563 * We only sync the cache if the drive is still open, and
1564 * if the drive is capable of it..
1566 if (((softc->flags & DA_FLAG_OPEN) == 0)
1567 || (softc->quirks & DA_Q_NO_SYNC_CACHE))
1570 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1572 ccb.ccb_h.ccb_state = DA_CCB_DUMP;
1573 scsi_synchronize_cache(&ccb.csio,
1577 /*begin_lba*/0, /* whole disk */
1582 xpt_polled_action(&ccb);
1584 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1585 if (((ccb.ccb_h.status & CAM_STATUS_MASK) ==
1586 CAM_SCSI_STATUS_ERROR)
1587 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){
1588 int error_code, sense_key, asc, ascq;
1590 scsi_extract_sense(&ccb.csio.sense_data,
1591 &error_code, &sense_key,
1594 if (sense_key != SSD_KEY_ILLEGAL_REQUEST)
1595 scsi_sense_print(&ccb.csio);
1597 xpt_print_path(periph->path);
1598 printf("Synchronize cache failed, status "
1599 "== 0x%x, scsi status == 0x%x\n",
1600 ccb.ccb_h.status, ccb.csio.scsi_status);
1604 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1605 cam_release_devq(ccb.ccb_h.path,
1609 /*getcount_only*/0);