2 * Common functions for CAM "type" (peripheral) drivers.
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/linker_set.h>
41 #include <sys/mutex.h>
44 #include <sys/devicestat.h>
47 #include <vm/vm_extern.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_xpt_periph.h>
52 #include <cam/cam_periph.h>
53 #include <cam/cam_debug.h>
55 #include <cam/scsi/scsi_all.h>
56 #include <cam/scsi/scsi_message.h>
57 #include <cam/scsi/scsi_pass.h>
59 static u_int camperiphnextunit(struct periph_driver *p_drv,
60 u_int newunit, int wired,
61 path_id_t pathid, target_id_t target,
63 static u_int camperiphunit(struct periph_driver *p_drv,
64 path_id_t pathid, target_id_t target,
66 static void camperiphdone(struct cam_periph *periph,
68 static void camperiphfree(struct cam_periph *periph);
69 static int camperiphscsistatuserror(union ccb *ccb,
71 u_int32_t sense_flags,
74 u_int32_t *relsim_flags,
76 static int camperiphscsisenseerror(union ccb *ccb,
78 u_int32_t sense_flags,
81 u_int32_t *relsim_flags,
84 static int nperiph_drivers;
85 struct periph_driver **periph_drivers;
87 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
90 periphdriver_register(void *data)
92 struct periph_driver **newdrivers, **old;
95 ndrivers = nperiph_drivers + 2;
96 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_TEMP, M_WAITOK);
98 bcopy(periph_drivers, newdrivers,
99 sizeof(*newdrivers) * nperiph_drivers);
100 newdrivers[nperiph_drivers] = (struct periph_driver *)data;
101 newdrivers[nperiph_drivers + 1] = NULL;
102 old = periph_drivers;
103 periph_drivers = newdrivers;
110 cam_periph_alloc(periph_ctor_t *periph_ctor,
111 periph_oninv_t *periph_oninvalidate,
112 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
113 char *name, cam_periph_type type, struct cam_path *path,
114 ac_callback_t *ac_callback, ac_code code, void *arg)
116 struct periph_driver **p_drv;
117 struct cam_periph *periph;
118 struct cam_periph *cur_periph;
120 target_id_t target_id;
128 * Handle Hot-Plug scenarios. If there is already a peripheral
129 * of our type assigned to this path, we are likely waiting for
130 * final close on an old, invalidated, peripheral. If this is
131 * the case, queue up a deferred call to the peripheral's async
132 * handler. If it looks like a mistaken re-allocation, complain.
134 if ((periph = cam_periph_find(path, name)) != NULL) {
136 if ((periph->flags & CAM_PERIPH_INVALID) != 0
137 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
138 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
139 periph->deferred_callback = ac_callback;
140 periph->deferred_ac = code;
141 return (CAM_REQ_INPROG);
143 printf("cam_periph_alloc: attempt to re-allocate "
144 "valid device %s%d rejected\n",
145 periph->periph_name, periph->unit_number);
147 return (CAM_REQ_INVALID);
150 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
154 return (CAM_RESRC_UNAVAIL);
158 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
159 if (strcmp((*p_drv)->driver_name, name) == 0)
163 path_id = xpt_path_path_id(path);
164 target_id = xpt_path_target_id(path);
165 lun_id = xpt_path_lun_id(path);
166 bzero(periph, sizeof(*periph));
167 cam_init_pinfo(&periph->pinfo);
168 periph->periph_start = periph_start;
169 periph->periph_dtor = periph_dtor;
170 periph->periph_oninval = periph_oninvalidate;
172 periph->periph_name = name;
173 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
174 periph->immediate_priority = CAM_PRIORITY_NONE;
175 periph->refcount = 0;
176 SLIST_INIT(&periph->ccb_list);
177 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
178 if (status != CAM_REQ_CMP)
184 status = xpt_add_periph(periph);
186 if (status != CAM_REQ_CMP)
190 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
191 while (cur_periph != NULL
192 && cur_periph->unit_number < periph->unit_number)
193 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
195 if (cur_periph != NULL)
196 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
198 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
199 (*p_drv)->generation++;
206 status = periph_ctor(periph, arg);
208 if (status == CAM_REQ_CMP)
212 switch (init_level) {
214 /* Initialized successfully */
218 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
220 xpt_remove_periph(periph);
223 xpt_free_path(periph->path);
226 free(periph, M_CAMPERIPH);
229 /* No cleanup to perform. */
232 panic("cam_periph_alloc: Unkown init level");
238 * Find a peripheral structure with the specified path, target, lun,
239 * and (optionally) type. If the name is NULL, this function will return
240 * the first peripheral driver that matches the specified path.
243 cam_periph_find(struct cam_path *path, char *name)
245 struct periph_driver **p_drv;
246 struct cam_periph *periph;
249 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
251 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
255 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
256 if (xpt_path_comp(periph->path, path) == 0) {
269 cam_periph_acquire(struct cam_periph *periph)
274 return(CAM_REQ_CMP_ERR);
284 cam_periph_release(struct cam_periph *periph)
292 if ((--periph->refcount == 0)
293 && (periph->flags & CAM_PERIPH_INVALID)) {
294 camperiphfree(periph);
301 * Look for the next unit number that is not currently in use for this
302 * peripheral type starting at "newunit". Also exclude unit numbers that
303 * are reserved by for future "hardwiring" unless we already know that this
304 * is a potential wired device. Only assume that the device is "wired" the
305 * first time through the loop since after that we'll be looking at unit
306 * numbers that did not match a wiring entry.
309 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
310 path_id_t pathid, target_id_t target, lun_id_t lun)
312 struct cam_periph *periph;
315 int i, val, dunit, r;
316 const char *dname, *strval;
319 periph_name = p_drv->driver_name;
322 for (periph = TAILQ_FIRST(&p_drv->units);
323 periph != NULL && periph->unit_number != newunit;
324 periph = TAILQ_NEXT(periph, unit_links))
327 if (periph != NULL && periph->unit_number == newunit) {
329 xpt_print_path(periph->path);
330 printf("Duplicate Wired Device entry!\n");
331 xpt_print_path(periph->path);
332 printf("Second device (%s device at scbus%d "
333 "target %d lun %d) will not be wired\n",
334 periph_name, pathid, target, lun);
343 * Don't match entries like "da 4" as a wired down
344 * device, but do match entries like "da 4 target 5"
345 * or even "da 4 scbus 1".
350 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
353 /* if no "target" and no specific scbus, skip */
354 if (resource_int_value(dname, dunit, "target", &val) &&
355 (resource_string_value(dname, dunit, "at",&strval)||
356 strcmp(strval, "scbus") == 0))
358 if (newunit == dunit)
369 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
370 target_id_t target, lun_id_t lun)
373 int wired, i, val, dunit;
374 const char *dname, *strval;
375 char pathbuf[32], *periph_name;
377 periph_name = p_drv->driver_name;
378 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
382 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
384 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
385 if (strcmp(strval, pathbuf) != 0)
389 if (resource_int_value(dname, dunit, "target", &val) == 0) {
394 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
406 * Either start from 0 looking for the next unit or from
407 * the unit number given in the resource config. This way,
408 * if we have wildcard matches, we don't return the same
411 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
417 cam_periph_invalidate(struct cam_periph *periph)
423 * We only call this routine the first time a peripheral is
424 * invalidated. The oninvalidate() routine is always called at
427 if (((periph->flags & CAM_PERIPH_INVALID) == 0)
428 && (periph->periph_oninval != NULL))
429 periph->periph_oninval(periph);
431 periph->flags |= CAM_PERIPH_INVALID;
432 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
434 if (periph->refcount == 0)
435 camperiphfree(periph);
436 else if (periph->refcount < 0)
437 printf("cam_invalidate_periph: refcount < 0!!\n");
442 camperiphfree(struct cam_periph *periph)
445 struct periph_driver **p_drv;
447 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
448 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
451 if (*p_drv == NULL) {
452 printf("camperiphfree: attempt to free non-existant periph\n");
456 if (periph->periph_dtor != NULL)
457 periph->periph_dtor(periph);
460 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
461 (*p_drv)->generation++;
464 xpt_remove_periph(periph);
466 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
470 switch (periph->deferred_ac) {
471 case AC_FOUND_DEVICE:
472 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
473 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
477 case AC_PATH_REGISTERED:
478 ccb.ccb_h.func_code = XPT_PATH_INQ;
479 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
487 periph->deferred_callback(NULL, periph->deferred_ac,
490 xpt_free_path(periph->path);
491 free(periph, M_CAMPERIPH);
495 * Wait interruptibly for an exclusive lock.
498 cam_periph_lock(struct cam_periph *periph, int priority)
503 * Increment the reference count on the peripheral
504 * while we wait for our lock attempt to succeed
505 * to ensure the peripheral doesn't disappear out
506 * from under us while we sleep.
508 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
511 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
512 periph->flags |= CAM_PERIPH_LOCK_WANTED;
513 if ((error = tsleep(periph, priority, "caplck", 0)) != 0) {
514 cam_periph_release(periph);
519 periph->flags |= CAM_PERIPH_LOCKED;
524 * Unlock and wake up any waiters.
527 cam_periph_unlock(struct cam_periph *periph)
529 periph->flags &= ~CAM_PERIPH_LOCKED;
530 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
531 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
535 cam_periph_release(periph);
539 * Map user virtual pointers into kernel virtual address space, so we can
540 * access the memory. This won't work on physical pointers, for now it's
541 * up to the caller to check for that. (XXX KDM -- should we do that here
542 * instead?) This also only works for up to MAXPHYS memory. Since we use
543 * buffers to map stuff in and out, we're limited to the buffer size.
546 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
549 int flags[CAM_PERIPH_MAXMAPS];
550 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
551 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
552 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
554 switch(ccb->ccb_h.func_code) {
556 if (ccb->cdm.match_buf_len == 0) {
557 printf("cam_periph_mapmem: invalid match buffer "
561 if (ccb->cdm.pattern_buf_len > 0) {
562 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
563 lengths[0] = ccb->cdm.pattern_buf_len;
564 dirs[0] = CAM_DIR_OUT;
565 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
566 lengths[1] = ccb->cdm.match_buf_len;
567 dirs[1] = CAM_DIR_IN;
570 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
571 lengths[0] = ccb->cdm.match_buf_len;
572 dirs[0] = CAM_DIR_IN;
577 case XPT_CONT_TARGET_IO:
578 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
581 data_ptrs[0] = &ccb->csio.data_ptr;
582 lengths[0] = ccb->csio.dxfer_len;
583 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
588 break; /* NOTREACHED */
592 * Check the transfer length and permissions first, so we don't
593 * have to unmap any previously mapped buffers.
595 for (i = 0; i < numbufs; i++) {
600 * The userland data pointer passed in may not be page
601 * aligned. vmapbuf() truncates the address to a page
602 * boundary, so if the address isn't page aligned, we'll
603 * need enough space for the given transfer length, plus
604 * whatever extra space is necessary to make it to the page
608 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){
609 printf("cam_periph_mapmem: attempt to map %lu bytes, "
610 "which is greater than DFLTPHYS(%d)\n",
612 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
617 if (dirs[i] & CAM_DIR_OUT) {
618 flags[i] = BIO_WRITE;
621 if (dirs[i] & CAM_DIR_IN) {
627 /* this keeps the current process from getting swapped */
629 * XXX KDM should I use P_NOSWAP instead?
633 for (i = 0; i < numbufs; i++) {
637 mapinfo->bp[i] = getpbuf(NULL);
639 /* save the buffer's data address */
640 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
642 /* put our pointer in the data slot */
643 mapinfo->bp[i]->b_data = *data_ptrs[i];
645 /* set the transfer length, we know it's < DFLTPHYS */
646 mapinfo->bp[i]->b_bufsize = lengths[i];
648 /* set the direction */
649 mapinfo->bp[i]->b_iocmd = flags[i];
652 * Map the buffer into kernel memory.
654 * Note that useracc() alone is not a sufficient test.
655 * vmapbuf() can still fail due to a smaller file mapped
656 * into a larger area of VM, or if userland races against
657 * vmapbuf() after the useracc() check.
659 if (vmapbuf(mapinfo->bp[i]) < 0) {
660 for (j = 0; j < i; ++j) {
661 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
662 vunmapbuf(mapinfo->bp[j]);
663 relpbuf(mapinfo->bp[j], NULL);
665 relpbuf(mapinfo->bp[i], NULL);
670 /* set our pointer to the new mapped area */
671 *data_ptrs[i] = mapinfo->bp[i]->b_data;
673 mapinfo->num_bufs_used++;
680 * Unmap memory segments mapped into kernel virtual address space by
681 * cam_periph_mapmem().
684 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
687 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
689 if (mapinfo->num_bufs_used <= 0) {
690 /* allow ourselves to be swapped once again */
695 switch (ccb->ccb_h.func_code) {
697 numbufs = min(mapinfo->num_bufs_used, 2);
700 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
702 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
703 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
707 case XPT_CONT_TARGET_IO:
708 data_ptrs[0] = &ccb->csio.data_ptr;
709 numbufs = min(mapinfo->num_bufs_used, 1);
712 /* allow ourselves to be swapped once again */
715 break; /* NOTREACHED */
718 for (i = 0; i < numbufs; i++) {
719 /* Set the user's pointer back to the original value */
720 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
722 /* unmap the buffer */
723 vunmapbuf(mapinfo->bp[i]);
725 /* release the buffer */
726 relpbuf(mapinfo->bp[i], NULL);
729 /* allow ourselves to be swapped once again */
734 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
736 struct ccb_hdr *ccb_h;
739 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
743 while (SLIST_FIRST(&periph->ccb_list) == NULL) {
744 if (periph->immediate_priority > priority)
745 periph->immediate_priority = priority;
746 xpt_schedule(periph, priority);
747 if ((SLIST_FIRST(&periph->ccb_list) != NULL)
748 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
750 tsleep(&periph->ccb_list, PRIBIO, "cgticb", 0);
753 ccb_h = SLIST_FIRST(&periph->ccb_list);
754 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
756 return ((union ccb *)ccb_h);
760 cam_periph_ccbwait(union ccb *ccb)
765 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
766 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
767 tsleep(&ccb->ccb_h.cbfcnp, PRIBIO, "cbwait", 0);
773 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr,
774 int (*error_routine)(union ccb *ccb,
776 u_int32_t sense_flags))
786 ccb = cam_periph_getccb(periph, /* priority */ 1);
787 xpt_setup_ccb(&ccb->ccb_h,
790 ccb->ccb_h.func_code = XPT_GDEVLIST;
793 * Basically, the point of this is that we go through
794 * getting the list of devices, until we find a passthrough
795 * device. In the current version of the CAM code, the
796 * only way to determine what type of device we're dealing
797 * with is by its name.
801 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
802 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
804 /* we want the next device in the list */
806 if (strncmp(ccb->cgdl.periph_name,
812 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
814 ccb->cgdl.periph_name[0] = '\0';
815 ccb->cgdl.unit_number = 0;
820 /* copy the result back out */
821 bcopy(ccb, addr, sizeof(union ccb));
823 /* and release the ccb */
824 xpt_release_ccb(ccb);
835 cam_periph_runccb(union ccb *ccb,
836 int (*error_routine)(union ccb *ccb,
838 u_int32_t sense_flags),
839 cam_flags camflags, u_int32_t sense_flags,
847 * If the user has supplied a stats structure, and if we understand
848 * this particular type of ccb, record the transaction start.
850 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
851 devstat_start_transaction(ds, NULL);
856 cam_periph_ccbwait(ccb);
857 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
859 else if (error_routine != NULL)
860 error = (*error_routine)(ccb, camflags, sense_flags);
864 } while (error == ERESTART);
866 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
867 cam_release_devq(ccb->ccb_h.path,
871 /* getcount_only */ FALSE);
873 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
874 devstat_end_transaction(ds,
876 ccb->csio.tag_action & 0xf,
877 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
878 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
879 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
881 DEVSTAT_READ, NULL, NULL);
887 cam_freeze_devq(struct cam_path *path)
889 struct ccb_hdr ccb_h;
891 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
892 ccb_h.func_code = XPT_NOOP;
893 ccb_h.flags = CAM_DEV_QFREEZE;
894 xpt_action((union ccb *)&ccb_h);
898 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
899 u_int32_t openings, u_int32_t timeout,
902 struct ccb_relsim crs;
904 xpt_setup_ccb(&crs.ccb_h, path,
906 crs.ccb_h.func_code = XPT_REL_SIMQ;
907 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
908 crs.release_flags = relsim_flags;
909 crs.openings = openings;
910 crs.release_timeout = timeout;
911 xpt_action((union ccb *)&crs);
912 return (crs.qfrozen_cnt);
915 #define saved_ccb_ptr ppriv_ptr0
917 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
919 union ccb *saved_ccb;
923 struct scsi_start_stop_unit *scsi_cmd;
924 u_int32_t relsim_flags, timeout;
925 u_int32_t qfrozen_cnt;
928 xpt_done_ccb = FALSE;
929 status = done_ccb->ccb_h.status;
930 frozen = (status & CAM_DEV_QFRZN) != 0;
931 sense = (status & CAM_AUTOSNS_VALID) != 0;
932 status &= CAM_STATUS_MASK;
936 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
939 * Unfreeze the queue once if it is already frozen..
942 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
953 * If we have successfully taken a device from the not
954 * ready to ready state, re-scan the device and re-get
955 * the inquiry information. Many devices (mostly disks)
956 * don't properly report their inquiry information unless
959 * If we manually retrieved sense into a CCB and got
960 * something other than "NO SENSE" send the updated CCB
961 * back to the client via xpt_done() to be processed via
962 * the error recovery code again.
964 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
965 scsi_cmd = (struct scsi_start_stop_unit *)
966 &done_ccb->csio.cdb_io.cdb_bytes;
968 if (scsi_cmd->opcode == START_STOP_UNIT)
969 xpt_async(AC_INQ_CHANGED,
970 done_ccb->ccb_h.path, NULL);
971 if (scsi_cmd->opcode == REQUEST_SENSE) {
974 sense_key = saved_ccb->csio.sense_data.flags;
975 sense_key &= SSD_KEY;
976 if (sense_key != SSD_KEY_NO_SENSE) {
977 saved_ccb->ccb_h.status |=
980 xpt_print_path(saved_ccb->ccb_h.path);
981 printf("Recovered Sense\n");
982 scsi_sense_print(&saved_ccb->csio);
983 cam_error_print(saved_ccb, CAM_ESF_ALL,
990 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
993 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
995 if (xpt_done_ccb == FALSE)
996 xpt_action(done_ccb);
1000 case CAM_SCSI_STATUS_ERROR:
1001 scsi_cmd = (struct scsi_start_stop_unit *)
1002 &done_ccb->csio.cdb_io.cdb_bytes;
1004 struct ccb_getdev cgd;
1005 struct scsi_sense_data *sense;
1006 int error_code, sense_key, asc, ascq;
1007 scsi_sense_action err_action;
1009 sense = &done_ccb->csio.sense_data;
1010 scsi_extract_sense(sense, &error_code,
1011 &sense_key, &asc, &ascq);
1014 * Grab the inquiry data for this device.
1016 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
1018 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1019 xpt_action((union ccb *)&cgd);
1020 err_action = scsi_error_action(&done_ccb->csio,
1024 * If the error is "invalid field in CDB",
1025 * and the load/eject flag is set, turn the
1026 * flag off and try again. This is just in
1027 * case the drive in question barfs on the
1028 * load eject flag. The CAM code should set
1029 * the load/eject flag by default for
1034 * Should we check to see what the specific
1035 * scsi status is?? Or does it not matter
1036 * since we already know that there was an
1037 * error, and we know what the specific
1038 * error code was, and we know what the
1041 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1042 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1043 (asc == 0x24) && (ascq == 0x00) &&
1044 (done_ccb->ccb_h.retry_count > 0)) {
1046 scsi_cmd->how &= ~SSS_LOEJ;
1048 xpt_action(done_ccb);
1050 } else if ((done_ccb->ccb_h.retry_count > 1)
1051 && ((err_action & SS_MASK) != SS_FAIL)) {
1054 * In this case, the error recovery
1055 * command failed, but we've got
1056 * some retries left on it. Give
1057 * it another try unless this is an
1058 * unretryable error.
1061 /* set the timeout to .5 sec */
1063 RELSIM_RELEASE_AFTER_TIMEOUT;
1066 xpt_action(done_ccb);
1072 * Perform the final retry with the original
1073 * CCB so that final error processing is
1074 * performed by the owner of the CCB.
1076 bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1077 done_ccb, sizeof(union ccb));
1079 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1081 xpt_action(done_ccb);
1085 * Eh?? The command failed, but we don't
1086 * have any sense. What's up with that?
1087 * Fire the CCB again to return it to the
1090 bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1091 done_ccb, sizeof(union ccb));
1093 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1095 xpt_action(done_ccb);
1100 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
1103 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1105 xpt_action(done_ccb);
1110 /* decrement the retry count */
1112 * XXX This isn't appropriate in all cases. Restructure,
1113 * so that the retry count is only decremented on an
1114 * actual retry. Remeber that the orignal ccb had its
1115 * retry count dropped before entering recovery, so
1116 * doing it again is a bug.
1118 if (done_ccb->ccb_h.retry_count > 0)
1119 done_ccb->ccb_h.retry_count--;
1121 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1122 /*relsim_flags*/relsim_flags,
1125 /*getcount_only*/0);
1126 if (xpt_done_ccb == TRUE)
1127 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
1131 * Generic Async Event handler. Peripheral drivers usually
1132 * filter out the events that require personal attention,
1133 * and leave the rest to this function.
1136 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1137 struct cam_path *path, void *arg)
1140 case AC_LOST_DEVICE:
1141 cam_periph_invalidate(periph);
1146 cam_periph_bus_settle(periph, scsi_delay);
1155 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1157 struct ccb_getdevstats cgds;
1159 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1);
1160 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1161 xpt_action((union ccb *)&cgds);
1162 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1166 cam_periph_freeze_after_event(struct cam_periph *periph,
1167 struct timeval* event_time, u_int duration_ms)
1169 struct timeval delta;
1170 struct timeval duration_tv;
1176 timevalsub(&delta, event_time);
1177 duration_tv.tv_sec = duration_ms / 1000;
1178 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1179 if (timevalcmp(&delta, &duration_tv, <)) {
1180 timevalsub(&duration_tv, &delta);
1182 duration_ms = duration_tv.tv_sec * 1000;
1183 duration_ms += duration_tv.tv_usec / 1000;
1184 cam_freeze_devq(periph->path);
1185 cam_release_devq(periph->path,
1186 RELSIM_RELEASE_AFTER_TIMEOUT,
1188 /*timeout*/duration_ms,
1189 /*getcount_only*/0);
1195 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags,
1196 u_int32_t sense_flags, union ccb *save_ccb,
1197 int *openings, u_int32_t *relsim_flags,
1202 switch (ccb->csio.scsi_status) {
1203 case SCSI_STATUS_OK:
1204 case SCSI_STATUS_COND_MET:
1205 case SCSI_STATUS_INTERMED:
1206 case SCSI_STATUS_INTERMED_COND_MET:
1209 case SCSI_STATUS_CMD_TERMINATED:
1210 case SCSI_STATUS_CHECK_COND:
1211 error = camperiphscsisenseerror(ccb,
1219 case SCSI_STATUS_QUEUE_FULL:
1222 struct ccb_getdevstats cgds;
1225 * First off, find out what the current
1226 * transaction counts are.
1228 xpt_setup_ccb(&cgds.ccb_h,
1231 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1232 xpt_action((union ccb *)&cgds);
1235 * If we were the only transaction active, treat
1236 * the QUEUE FULL as if it were a BUSY condition.
1238 if (cgds.dev_active != 0) {
1242 * Reduce the number of openings to
1243 * be 1 less than the amount it took
1244 * to get a queue full bounded by the
1245 * minimum allowed tag count for this
1248 total_openings = cgds.dev_active + cgds.dev_openings;
1249 *openings = cgds.dev_active;
1250 if (*openings < cgds.mintags)
1251 *openings = cgds.mintags;
1252 if (*openings < total_openings)
1253 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1256 * Some devices report queue full for
1257 * temporary resource shortages. For
1258 * this reason, we allow a minimum
1259 * tag count to be entered via a
1260 * quirk entry to prevent the queue
1261 * count on these devices from falling
1262 * to a pessimisticly low value. We
1263 * still wait for the next successful
1264 * completion, however, before queueing
1265 * more transactions to the device.
1267 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1272 xpt_print_path(ccb->ccb_h.path);
1273 printf("Queue Full\n");
1279 case SCSI_STATUS_BUSY:
1281 * Restart the queue after either another
1282 * command completes or a 1 second timeout.
1285 xpt_print_path(ccb->ccb_h.path);
1286 printf("Device Busy\n");
1288 if (ccb->ccb_h.retry_count > 0) {
1289 ccb->ccb_h.retry_count--;
1291 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1292 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1298 case SCSI_STATUS_RESERV_CONFLICT:
1299 xpt_print_path(ccb->ccb_h.path);
1300 printf("Reservation Conflict\n");
1304 xpt_print_path(ccb->ccb_h.path);
1305 printf("SCSI Status 0x%x\n", ccb->csio.scsi_status);
1313 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags,
1314 u_int32_t sense_flags, union ccb *save_ccb,
1315 int *openings, u_int32_t *relsim_flags,
1318 struct cam_periph *periph;
1321 periph = xpt_path_periph(ccb->ccb_h.path);
1322 if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) {
1325 * If error recovery is already in progress, don't attempt
1326 * to process this error, but requeue it unconditionally
1327 * and attempt to process it once error recovery has
1328 * completed. This failed command is probably related to
1329 * the error that caused the currently active error recovery
1330 * action so our current recovery efforts should also
1331 * address this command. Be aware that the error recovery
1332 * code assumes that only one recovery action is in progress
1333 * on a particular peripheral instance at any given time
1334 * (e.g. only one saved CCB for error recovery) so it is
1335 * imperitive that we don't violate this assumption.
1339 scsi_sense_action err_action;
1340 struct ccb_getdev cgd;
1341 const char *action_string;
1342 union ccb* print_ccb;
1344 /* A description of the error recovery action performed */
1345 action_string = NULL;
1348 * The location of the orignal ccb
1349 * for sense printing purposes.
1354 * Grab the inquiry data for this device.
1356 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1);
1357 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1358 xpt_action((union ccb *)&cgd);
1360 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1361 err_action = scsi_error_action(&ccb->csio,
1364 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1365 err_action = SS_REQSENSE;
1367 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1369 error = err_action & SS_ERRMASK;
1372 * If the recovery action will consume a retry,
1373 * make sure we actually have retries available.
1375 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1376 if (ccb->ccb_h.retry_count > 0)
1377 ccb->ccb_h.retry_count--;
1379 action_string = "Retries Exhausted";
1380 goto sense_error_done;
1384 if ((err_action & SS_MASK) >= SS_START) {
1386 * Do common portions of commands that
1387 * use recovery CCBs.
1389 if (save_ccb == NULL) {
1390 action_string = "No recovery CCB supplied";
1391 goto sense_error_done;
1393 bcopy(ccb, save_ccb, sizeof(*save_ccb));
1394 print_ccb = save_ccb;
1395 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1398 switch (err_action & SS_MASK) {
1400 action_string = "No Recovery Action Needed";
1404 action_string = "Retrying Command (per Sense Data)";
1408 action_string = "Unretryable error";
1415 * Send a start unit command to the device, and
1416 * then retry the command.
1418 action_string = "Attempting to Start Unit";
1421 * Check for removable media and set
1422 * load/eject flag appropriately.
1424 if (SID_IS_REMOVABLE(&cgd.inq_data))
1429 scsi_start_stop(&ccb->csio,
1443 * Send a Test Unit Ready to the device.
1444 * If the 'many' flag is set, we send 120
1445 * test unit ready commands, one every half
1446 * second. Otherwise, we just send one TUR.
1447 * We only want to do this if the retry
1448 * count has not been exhausted.
1452 if ((err_action & SSQ_MANY) != 0) {
1453 action_string = "Polling device for readiness";
1456 action_string = "Testing device for readiness";
1459 scsi_test_unit_ready(&ccb->csio,
1467 * Accomplish our 500ms delay by deferring
1468 * the release of our device queue appropriately.
1470 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1477 * Send a Request Sense to the device. We
1478 * assume that we are in a contingent allegiance
1479 * condition so we do not tag this request.
1481 scsi_request_sense(&ccb->csio, /*retries*/1,
1483 &save_ccb->csio.sense_data,
1484 sizeof(save_ccb->csio.sense_data),
1485 CAM_TAG_ACTION_NONE,
1486 /*sense_len*/SSD_FULL_SIZE,
1491 panic("Unhandled error action %x", err_action);
1494 if ((err_action & SS_MASK) >= SS_START) {
1496 * Drop the priority to 0 so that the recovery
1497 * CCB is the first to execute. Freeze the queue
1498 * after this command is sent so that we can
1499 * restore the old csio and have it queued in
1500 * the proper order before we release normal
1501 * transactions to the device.
1503 ccb->ccb_h.pinfo.priority = 0;
1504 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1505 ccb->ccb_h.saved_ccb_ptr = save_ccb;
1510 if ((err_action & SSQ_PRINT_SENSE) != 0
1511 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) {
1512 cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1513 xpt_print_path(ccb->ccb_h.path);
1515 scsi_sense_print(&print_ccb->csio);
1516 printf("%s\n", action_string);
1523 * Generic error handler. Peripheral drivers usually filter
1524 * out the errors that they handle in a unique mannor, then
1525 * call this function.
1528 cam_periph_error(union ccb *ccb, cam_flags camflags,
1529 u_int32_t sense_flags, union ccb *save_ccb)
1531 const char *action_string;
1534 int error, printed = 0;
1536 u_int32_t relsim_flags;
1539 action_string = NULL;
1540 status = ccb->ccb_h.status;
1541 frozen = (status & CAM_DEV_QFRZN) != 0;
1542 status &= CAM_STATUS_MASK;
1543 openings = relsim_flags = 0;
1549 case CAM_SCSI_STATUS_ERROR:
1550 error = camperiphscsistatuserror(ccb,
1558 case CAM_AUTOSENSE_FAIL:
1559 xpt_print_path(ccb->ccb_h.path);
1560 printf("AutoSense Failed\n");
1561 error = EIO; /* we have to kill the command */
1563 case CAM_REQ_CMP_ERR:
1564 if (bootverbose && printed == 0) {
1565 xpt_print_path(ccb->ccb_h.path);
1566 printf("Request completed with CAM_REQ_CMP_ERR\n");
1570 case CAM_CMD_TIMEOUT:
1571 if (bootverbose && printed == 0) {
1572 xpt_print_path(ccb->ccb_h.path);
1573 printf("Command timed out\n");
1577 case CAM_UNEXP_BUSFREE:
1578 if (bootverbose && printed == 0) {
1579 xpt_print_path(ccb->ccb_h.path);
1580 printf("Unexpected Bus Free\n");
1584 case CAM_UNCOR_PARITY:
1585 if (bootverbose && printed == 0) {
1586 xpt_print_path(ccb->ccb_h.path);
1587 printf("Uncorrected Parity Error\n");
1591 case CAM_DATA_RUN_ERR:
1592 if (bootverbose && printed == 0) {
1593 xpt_print_path(ccb->ccb_h.path);
1594 printf("Data Overrun\n");
1597 error = EIO; /* we have to kill the command */
1598 /* decrement the number of retries */
1599 if (ccb->ccb_h.retry_count > 0) {
1600 ccb->ccb_h.retry_count--;
1603 action_string = "Retries Exausted";
1609 case CAM_MSG_REJECT_REC:
1610 /* XXX Don't know that these are correct */
1613 case CAM_SEL_TIMEOUT:
1615 struct cam_path *newpath;
1617 if ((camflags & CAM_RETRY_SELTO) != 0) {
1618 if (ccb->ccb_h.retry_count > 0) {
1620 ccb->ccb_h.retry_count--;
1622 if (bootverbose && printed == 0) {
1623 xpt_print_path(ccb->ccb_h.path);
1624 printf("Selection Timeout\n");
1629 * Wait a second to give the device
1630 * time to recover before we try again.
1632 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1638 /* Should we do more if we can't create the path?? */
1639 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1640 xpt_path_path_id(ccb->ccb_h.path),
1641 xpt_path_target_id(ccb->ccb_h.path),
1642 CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1646 * Let peripheral drivers know that this device has gone
1649 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1650 xpt_free_path(newpath);
1653 case CAM_REQ_INVALID:
1654 case CAM_PATH_INVALID:
1655 case CAM_DEV_NOT_THERE:
1657 case CAM_PROVIDE_FAIL:
1658 case CAM_REQ_TOO_BIG:
1659 case CAM_LUN_INVALID:
1660 case CAM_TID_INVALID:
1663 case CAM_SCSI_BUS_RESET:
1666 * Commands that repeatedly timeout and cause these
1667 * kinds of error recovery actions, should return
1668 * CAM_CMD_TIMEOUT, which allows us to safely assume
1669 * that this command was an innocent bystander to
1670 * these events and should be unconditionally
1673 if (bootverbose && printed == 0) {
1674 xpt_print_path(ccb->ccb_h.path);
1675 if (status == CAM_BDR_SENT)
1676 printf("Bus Device Reset sent\n");
1678 printf("Bus Reset issued\n");
1682 case CAM_REQUEUE_REQ:
1683 /* Unconditional requeue */
1685 if (bootverbose && printed == 0) {
1686 xpt_print_path(ccb->ccb_h.path);
1687 printf("Request Requeued\n");
1691 case CAM_RESRC_UNAVAIL:
1695 /* decrement the number of retries */
1696 if (ccb->ccb_h.retry_count > 0) {
1697 ccb->ccb_h.retry_count--;
1699 if (bootverbose && printed == 0) {
1700 xpt_print_path(ccb->ccb_h.path);
1701 printf("CAM Status 0x%x\n", status);
1706 action_string = "Retries Exhausted";
1711 /* Attempt a retry */
1712 if (error == ERESTART || error == 0) {
1714 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1716 if (error == ERESTART) {
1717 action_string = "Retrying Command";
1722 cam_release_devq(ccb->ccb_h.path,
1726 /*getcount_only*/0);
1730 * If we have and error and are booting verbosely, whine
1731 * *unless* this was a non-retryable selection timeout.
1733 if (error != 0 && bootverbose &&
1734 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) {
1737 if (action_string == NULL)
1738 action_string = "Unretryable Error";
1739 if (error != ERESTART) {
1740 xpt_print_path(ccb->ccb_h.path);
1741 printf("error %d\n", error);
1743 xpt_print_path(ccb->ccb_h.path);
1744 printf("%s\n", action_string);