2 * Common functions for CAM "type" (peripheral) drivers.
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
40 #include <sys/mutex.h>
43 #include <sys/devicestat.h>
46 #include <vm/vm_extern.h>
49 #include <cam/cam_ccb.h>
50 #include <cam/cam_queue.h>
51 #include <cam/cam_xpt_periph.h>
52 #include <cam/cam_periph.h>
53 #include <cam/cam_debug.h>
54 #include <cam/cam_sim.h>
56 #include <cam/scsi/scsi_all.h>
57 #include <cam/scsi/scsi_message.h>
58 #include <cam/scsi/scsi_pass.h>
60 static u_int camperiphnextunit(struct periph_driver *p_drv,
61 u_int newunit, int wired,
62 path_id_t pathid, target_id_t target,
64 static u_int camperiphunit(struct periph_driver *p_drv,
65 path_id_t pathid, target_id_t target,
67 static void camperiphdone(struct cam_periph *periph,
69 static void camperiphfree(struct cam_periph *periph);
70 static int camperiphscsistatuserror(union ccb *ccb,
72 u_int32_t sense_flags,
74 u_int32_t *relsim_flags,
76 const char **action_string);
77 static int camperiphscsisenseerror(union ccb *ccb,
79 u_int32_t sense_flags,
81 u_int32_t *relsim_flags,
83 const char **action_string);
85 static int nperiph_drivers;
86 static int initialized = 0;
87 struct periph_driver **periph_drivers;
89 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
91 static int periph_selto_delay = 1000;
92 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
93 static int periph_noresrc_delay = 500;
94 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
95 static int periph_busy_delay = 500;
96 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
100 periphdriver_register(void *data)
102 struct periph_driver *drv = (struct periph_driver *)data;
103 struct periph_driver **newdrivers, **old;
106 ndrivers = nperiph_drivers + 2;
107 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
110 bcopy(periph_drivers, newdrivers,
111 sizeof(*newdrivers) * nperiph_drivers);
112 newdrivers[nperiph_drivers] = drv;
113 newdrivers[nperiph_drivers + 1] = NULL;
114 old = periph_drivers;
115 periph_drivers = newdrivers;
117 free(old, M_CAMPERIPH);
119 /* If driver marked as early or it is late now, initialize it. */
120 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
126 periphdriver_init(int level)
130 initialized = max(initialized, level);
131 for (i = 0; periph_drivers[i] != NULL; i++) {
132 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
133 if (early == initialized)
134 (*periph_drivers[i]->init)();
139 cam_periph_alloc(periph_ctor_t *periph_ctor,
140 periph_oninv_t *periph_oninvalidate,
141 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
142 char *name, cam_periph_type type, struct cam_path *path,
143 ac_callback_t *ac_callback, ac_code code, void *arg)
145 struct periph_driver **p_drv;
147 struct cam_periph *periph;
148 struct cam_periph *cur_periph;
150 target_id_t target_id;
157 * Handle Hot-Plug scenarios. If there is already a peripheral
158 * of our type assigned to this path, we are likely waiting for
159 * final close on an old, invalidated, peripheral. If this is
160 * the case, queue up a deferred call to the peripheral's async
161 * handler. If it looks like a mistaken re-allocation, complain.
163 if ((periph = cam_periph_find(path, name)) != NULL) {
165 if ((periph->flags & CAM_PERIPH_INVALID) != 0
166 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
167 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
168 periph->deferred_callback = ac_callback;
169 periph->deferred_ac = code;
170 return (CAM_REQ_INPROG);
172 printf("cam_periph_alloc: attempt to re-allocate "
173 "valid device %s%d rejected flags %#x "
174 "refcount %d\n", periph->periph_name,
175 periph->unit_number, periph->flags,
178 return (CAM_REQ_INVALID);
181 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
185 return (CAM_RESRC_UNAVAIL);
190 sim = xpt_path_sim(path);
191 path_id = xpt_path_path_id(path);
192 target_id = xpt_path_target_id(path);
193 lun_id = xpt_path_lun_id(path);
194 cam_init_pinfo(&periph->pinfo);
195 periph->periph_start = periph_start;
196 periph->periph_dtor = periph_dtor;
197 periph->periph_oninval = periph_oninvalidate;
199 periph->periph_name = name;
200 periph->immediate_priority = CAM_PRIORITY_NONE;
201 periph->refcount = 0;
203 SLIST_INIT(&periph->ccb_list);
204 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
205 if (status != CAM_REQ_CMP)
210 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
211 if (strcmp((*p_drv)->driver_name, name) == 0)
214 if (*p_drv == NULL) {
215 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
216 xpt_free_path(periph->path);
217 free(periph, M_CAMPERIPH);
219 return (CAM_REQ_INVALID);
221 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
222 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
223 while (cur_periph != NULL
224 && cur_periph->unit_number < periph->unit_number)
225 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
226 if (cur_periph != NULL) {
227 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
228 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
230 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
231 (*p_drv)->generation++;
237 status = xpt_add_periph(periph);
238 if (status != CAM_REQ_CMP)
242 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
244 status = periph_ctor(periph, arg);
246 if (status == CAM_REQ_CMP)
250 switch (init_level) {
252 /* Initialized successfully */
255 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
256 xpt_remove_periph(periph);
260 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
262 xpt_free_path(periph->path);
265 free(periph, M_CAMPERIPH);
268 /* No cleanup to perform. */
271 panic("%s: Unknown init level", __func__);
277 * Find a peripheral structure with the specified path, target, lun,
278 * and (optionally) type. If the name is NULL, this function will return
279 * the first peripheral driver that matches the specified path.
282 cam_periph_find(struct cam_path *path, char *name)
284 struct periph_driver **p_drv;
285 struct cam_periph *periph;
288 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
290 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
293 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
294 if (xpt_path_comp(periph->path, path) == 0) {
296 mtx_assert(periph->sim->mtx, MA_OWNED);
310 cam_periph_acquire(struct cam_periph *periph)
314 status = CAM_REQ_CMP_ERR;
319 if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
321 status = CAM_REQ_CMP;
329 cam_periph_release_locked_buses(struct cam_periph *periph)
331 if (periph->refcount != 0) {
334 panic("%s: release of %p when refcount is zero\n ", __func__,
337 if (periph->refcount == 0
338 && (periph->flags & CAM_PERIPH_INVALID)) {
339 camperiphfree(periph);
344 cam_periph_release_locked(struct cam_periph *periph)
351 cam_periph_release_locked_buses(periph);
356 cam_periph_release(struct cam_periph *periph)
364 mtx_assert(sim->mtx, MA_NOTOWNED);
366 cam_periph_release_locked(periph);
367 mtx_unlock(sim->mtx);
371 cam_periph_hold(struct cam_periph *periph, int priority)
376 * Increment the reference count on the peripheral
377 * while we wait for our lock attempt to succeed
378 * to ensure the peripheral doesn't disappear out
379 * from user us while we sleep.
382 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
385 mtx_assert(periph->sim->mtx, MA_OWNED);
386 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
387 periph->flags |= CAM_PERIPH_LOCK_WANTED;
388 if ((error = mtx_sleep(periph, periph->sim->mtx, priority,
389 "caplck", 0)) != 0) {
390 cam_periph_release_locked(periph);
393 if (periph->flags & CAM_PERIPH_INVALID) {
394 cam_periph_release_locked(periph);
399 periph->flags |= CAM_PERIPH_LOCKED;
404 cam_periph_unhold(struct cam_periph *periph)
407 mtx_assert(periph->sim->mtx, MA_OWNED);
409 periph->flags &= ~CAM_PERIPH_LOCKED;
410 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
411 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
415 cam_periph_release_locked(periph);
419 * Look for the next unit number that is not currently in use for this
420 * peripheral type starting at "newunit". Also exclude unit numbers that
421 * are reserved by for future "hardwiring" unless we already know that this
422 * is a potential wired device. Only assume that the device is "wired" the
423 * first time through the loop since after that we'll be looking at unit
424 * numbers that did not match a wiring entry.
427 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
428 path_id_t pathid, target_id_t target, lun_id_t lun)
430 struct cam_periph *periph;
432 int i, val, dunit, r;
433 const char *dname, *strval;
435 periph_name = p_drv->driver_name;
438 for (periph = TAILQ_FIRST(&p_drv->units);
439 periph != NULL && periph->unit_number != newunit;
440 periph = TAILQ_NEXT(periph, unit_links))
443 if (periph != NULL && periph->unit_number == newunit) {
445 xpt_print(periph->path, "Duplicate Wired "
447 xpt_print(periph->path, "Second device (%s "
448 "device at scbus%d target %d lun %d) will "
449 "not be wired\n", periph_name, pathid,
459 * Don't match entries like "da 4" as a wired down
460 * device, but do match entries like "da 4 target 5"
461 * or even "da 4 scbus 1".
466 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
469 /* if no "target" and no specific scbus, skip */
470 if (resource_int_value(dname, dunit, "target", &val) &&
471 (resource_string_value(dname, dunit, "at",&strval)||
472 strcmp(strval, "scbus") == 0))
474 if (newunit == dunit)
484 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
485 target_id_t target, lun_id_t lun)
488 int wired, i, val, dunit;
489 const char *dname, *strval;
490 char pathbuf[32], *periph_name;
492 periph_name = p_drv->driver_name;
493 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
497 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
499 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
500 if (strcmp(strval, pathbuf) != 0)
504 if (resource_int_value(dname, dunit, "target", &val) == 0) {
509 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
521 * Either start from 0 looking for the next unit or from
522 * the unit number given in the resource config. This way,
523 * if we have wildcard matches, we don't return the same
526 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
532 cam_periph_invalidate(struct cam_periph *periph)
535 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
537 * We only call this routine the first time a peripheral is
540 if (((periph->flags & CAM_PERIPH_INVALID) == 0)
541 && (periph->periph_oninval != NULL))
542 periph->periph_oninval(periph);
544 periph->flags |= CAM_PERIPH_INVALID;
545 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
548 if (periph->refcount == 0)
549 camperiphfree(periph);
554 camperiphfree(struct cam_periph *periph)
556 struct periph_driver **p_drv;
558 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
559 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
562 if (*p_drv == NULL) {
563 printf("camperiphfree: attempt to free non-existant periph\n");
567 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
568 (*p_drv)->generation++;
571 if (periph->periph_dtor != NULL)
572 periph->periph_dtor(periph);
573 xpt_remove_periph(periph);
574 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
576 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
580 switch (periph->deferred_ac) {
581 case AC_FOUND_DEVICE:
582 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
583 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
587 case AC_PATH_REGISTERED:
588 ccb.ccb_h.func_code = XPT_PATH_INQ;
589 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
597 periph->deferred_callback(NULL, periph->deferred_ac,
600 xpt_free_path(periph->path);
601 free(periph, M_CAMPERIPH);
606 * Map user virtual pointers into kernel virtual address space, so we can
607 * access the memory. This won't work on physical pointers, for now it's
608 * up to the caller to check for that. (XXX KDM -- should we do that here
609 * instead?) This also only works for up to MAXPHYS memory. Since we use
610 * buffers to map stuff in and out, we're limited to the buffer size.
613 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
616 int flags[CAM_PERIPH_MAXMAPS];
617 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
618 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
619 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
620 /* Some controllers may not be able to handle more data. */
621 size_t maxmap = DFLTPHYS;
623 switch(ccb->ccb_h.func_code) {
625 if (ccb->cdm.match_buf_len == 0) {
626 printf("cam_periph_mapmem: invalid match buffer "
630 if (ccb->cdm.pattern_buf_len > 0) {
631 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
632 lengths[0] = ccb->cdm.pattern_buf_len;
633 dirs[0] = CAM_DIR_OUT;
634 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
635 lengths[1] = ccb->cdm.match_buf_len;
636 dirs[1] = CAM_DIR_IN;
639 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
640 lengths[0] = ccb->cdm.match_buf_len;
641 dirs[0] = CAM_DIR_IN;
645 * This request will not go to the hardware, no reason
646 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
651 case XPT_CONT_TARGET_IO:
652 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
655 data_ptrs[0] = &ccb->csio.data_ptr;
656 lengths[0] = ccb->csio.dxfer_len;
657 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
661 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
664 data_ptrs[0] = &ccb->ataio.data_ptr;
665 lengths[0] = ccb->ataio.dxfer_len;
666 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
671 break; /* NOTREACHED */
675 * Check the transfer length and permissions first, so we don't
676 * have to unmap any previously mapped buffers.
678 for (i = 0; i < numbufs; i++) {
683 * The userland data pointer passed in may not be page
684 * aligned. vmapbuf() truncates the address to a page
685 * boundary, so if the address isn't page aligned, we'll
686 * need enough space for the given transfer length, plus
687 * whatever extra space is necessary to make it to the page
691 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){
692 printf("cam_periph_mapmem: attempt to map %lu bytes, "
693 "which is greater than %lu\n",
695 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
700 if (dirs[i] & CAM_DIR_OUT) {
701 flags[i] = BIO_WRITE;
704 if (dirs[i] & CAM_DIR_IN) {
710 /* this keeps the current process from getting swapped */
712 * XXX KDM should I use P_NOSWAP instead?
716 for (i = 0; i < numbufs; i++) {
720 mapinfo->bp[i] = getpbuf(NULL);
722 /* save the buffer's data address */
723 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
725 /* put our pointer in the data slot */
726 mapinfo->bp[i]->b_data = *data_ptrs[i];
728 /* set the transfer length, we know it's < MAXPHYS */
729 mapinfo->bp[i]->b_bufsize = lengths[i];
731 /* set the direction */
732 mapinfo->bp[i]->b_iocmd = flags[i];
735 * Map the buffer into kernel memory.
737 * Note that useracc() alone is not a sufficient test.
738 * vmapbuf() can still fail due to a smaller file mapped
739 * into a larger area of VM, or if userland races against
740 * vmapbuf() after the useracc() check.
742 if (vmapbuf(mapinfo->bp[i]) < 0) {
743 for (j = 0; j < i; ++j) {
744 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
745 vunmapbuf(mapinfo->bp[j]);
746 relpbuf(mapinfo->bp[j], NULL);
748 relpbuf(mapinfo->bp[i], NULL);
753 /* set our pointer to the new mapped area */
754 *data_ptrs[i] = mapinfo->bp[i]->b_data;
756 mapinfo->num_bufs_used++;
760 * Now that we've gotten this far, change ownership to the kernel
761 * of the buffers so that we don't run afoul of returning to user
762 * space with locks (on the buffer) held.
764 for (i = 0; i < numbufs; i++) {
765 BUF_KERNPROC(mapinfo->bp[i]);
773 * Unmap memory segments mapped into kernel virtual address space by
774 * cam_periph_mapmem().
777 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
780 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
782 if (mapinfo->num_bufs_used <= 0) {
783 /* allow ourselves to be swapped once again */
788 switch (ccb->ccb_h.func_code) {
790 numbufs = min(mapinfo->num_bufs_used, 2);
793 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
795 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
796 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
800 case XPT_CONT_TARGET_IO:
801 data_ptrs[0] = &ccb->csio.data_ptr;
802 numbufs = min(mapinfo->num_bufs_used, 1);
805 data_ptrs[0] = &ccb->ataio.data_ptr;
806 numbufs = min(mapinfo->num_bufs_used, 1);
809 /* allow ourselves to be swapped once again */
812 break; /* NOTREACHED */
815 for (i = 0; i < numbufs; i++) {
816 /* Set the user's pointer back to the original value */
817 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
819 /* unmap the buffer */
820 vunmapbuf(mapinfo->bp[i]);
822 /* release the buffer */
823 relpbuf(mapinfo->bp[i], NULL);
826 /* allow ourselves to be swapped once again */
831 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
833 struct ccb_hdr *ccb_h;
835 mtx_assert(periph->sim->mtx, MA_OWNED);
836 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
838 while (SLIST_FIRST(&periph->ccb_list) == NULL) {
839 if (periph->immediate_priority > priority)
840 periph->immediate_priority = priority;
841 xpt_schedule(periph, priority);
842 if ((SLIST_FIRST(&periph->ccb_list) != NULL)
843 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
845 mtx_assert(periph->sim->mtx, MA_OWNED);
846 mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb",
850 ccb_h = SLIST_FIRST(&periph->ccb_list);
851 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
852 return ((union ccb *)ccb_h);
856 cam_periph_ccbwait(union ccb *ccb)
860 sim = xpt_path_sim(ccb->ccb_h.path);
861 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
862 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
863 mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0);
867 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
868 int (*error_routine)(union ccb *ccb,
870 u_int32_t sense_flags))
880 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
881 xpt_setup_ccb(&ccb->ccb_h,
883 CAM_PRIORITY_NORMAL);
884 ccb->ccb_h.func_code = XPT_GDEVLIST;
887 * Basically, the point of this is that we go through
888 * getting the list of devices, until we find a passthrough
889 * device. In the current version of the CAM code, the
890 * only way to determine what type of device we're dealing
891 * with is by its name.
895 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
896 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
898 /* we want the next device in the list */
900 if (strncmp(ccb->cgdl.periph_name,
906 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
908 ccb->cgdl.periph_name[0] = '\0';
909 ccb->cgdl.unit_number = 0;
914 /* copy the result back out */
915 bcopy(ccb, addr, sizeof(union ccb));
917 /* and release the ccb */
918 xpt_release_ccb(ccb);
929 cam_periph_runccb(union ccb *ccb,
930 int (*error_routine)(union ccb *ccb,
932 u_int32_t sense_flags),
933 cam_flags camflags, u_int32_t sense_flags,
940 sim = xpt_path_sim(ccb->ccb_h.path);
941 mtx_assert(sim->mtx, MA_OWNED);
944 * If the user has supplied a stats structure, and if we understand
945 * this particular type of ccb, record the transaction start.
947 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO ||
948 ccb->ccb_h.func_code == XPT_ATA_IO))
949 devstat_start_transaction(ds, NULL);
954 cam_periph_ccbwait(ccb);
955 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
957 else if (error_routine != NULL)
958 error = (*error_routine)(ccb, camflags, sense_flags);
962 } while (error == ERESTART);
964 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
965 cam_release_devq(ccb->ccb_h.path,
969 /* getcount_only */ FALSE);
970 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
974 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
975 devstat_end_transaction(ds,
977 ccb->csio.tag_action & 0x3,
978 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
979 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
980 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
982 DEVSTAT_READ, NULL, NULL);
983 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
984 devstat_end_transaction(ds,
985 ccb->ataio.dxfer_len,
986 ccb->ataio.tag_action & 0x3,
987 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
988 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
989 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
991 DEVSTAT_READ, NULL, NULL);
999 cam_freeze_devq(struct cam_path *path)
1002 cam_freeze_devq_arg(path, 0, 0);
1006 cam_freeze_devq_arg(struct cam_path *path, uint32_t flags, uint32_t arg)
1008 struct ccb_relsim crs;
1010 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NONE);
1011 crs.ccb_h.func_code = XPT_FREEZE_QUEUE;
1012 crs.release_flags = flags;
1014 crs.release_timeout = arg;
1015 xpt_action((union ccb *)&crs);
1019 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1020 u_int32_t openings, u_int32_t arg,
1023 struct ccb_relsim crs;
1025 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1026 crs.ccb_h.func_code = XPT_REL_SIMQ;
1027 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1028 crs.release_flags = relsim_flags;
1029 crs.openings = openings;
1030 crs.release_timeout = arg;
1031 xpt_action((union ccb *)&crs);
1032 return (crs.qfrozen_cnt);
1035 #define saved_ccb_ptr ppriv_ptr0
1036 #define recovery_depth ppriv_field1
1038 camperiphsensedone(struct cam_periph *periph, union ccb *done_ccb)
1040 union ccb *saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1044 int depth = done_ccb->ccb_h.recovery_depth;
1046 status = done_ccb->ccb_h.status;
1047 if (status & CAM_DEV_QFRZN) {
1050 * Clear freeze flag now for case of retry,
1051 * freeze will be dropped later.
1053 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1055 status &= CAM_STATUS_MASK;
1060 * If we manually retrieved sense into a CCB and got
1061 * something other than "NO SENSE" send the updated CCB
1062 * back to the client via xpt_done() to be processed via
1063 * the error recovery code again.
1065 sense_key = saved_ccb->csio.sense_data.flags;
1066 sense_key &= SSD_KEY;
1067 if (sense_key != SSD_KEY_NO_SENSE) {
1068 saved_ccb->ccb_h.status |=
1071 saved_ccb->ccb_h.status &=
1073 saved_ccb->ccb_h.status |=
1076 saved_ccb->csio.sense_resid = done_ccb->csio.resid;
1077 bcopy(saved_ccb, done_ccb, sizeof(union ccb));
1078 xpt_free_ccb(saved_ccb);
1082 bcopy(saved_ccb, done_ccb, sizeof(union ccb));
1083 xpt_free_ccb(saved_ccb);
1084 done_ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1085 done_ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
1088 periph->flags &= ~CAM_PERIPH_SENSE_INPROG;
1090 * If it is the end of recovery, drop freeze, taken due to
1091 * CAM_DEV_QFREEZE flag, set on recovery request.
1094 cam_release_devq(done_ccb->ccb_h.path,
1098 /*getcount_only*/0);
1101 * Copy frozen flag from recovery request if it is set there
1105 done_ccb->ccb_h.status |= CAM_DEV_QFRZN;
1106 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
1110 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1112 union ccb *saved_ccb, *save_ccb;
1115 struct scsi_start_stop_unit *scsi_cmd;
1116 u_int32_t relsim_flags, timeout;
1118 status = done_ccb->ccb_h.status;
1119 if (status & CAM_DEV_QFRZN) {
1122 * Clear freeze flag now for case of retry,
1123 * freeze will be dropped later.
1125 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1130 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1132 switch (status & CAM_STATUS_MASK) {
1136 * If we have successfully taken a device from the not
1137 * ready to ready state, re-scan the device and re-get
1138 * the inquiry information. Many devices (mostly disks)
1139 * don't properly report their inquiry information unless
1142 scsi_cmd = (struct scsi_start_stop_unit *)
1143 &done_ccb->csio.cdb_io.cdb_bytes;
1145 if (scsi_cmd->opcode == START_STOP_UNIT)
1146 xpt_async(AC_INQ_CHANGED,
1147 done_ccb->ccb_h.path, NULL);
1150 case CAM_SCSI_STATUS_ERROR:
1151 scsi_cmd = (struct scsi_start_stop_unit *)
1152 &done_ccb->csio.cdb_io.cdb_bytes;
1153 if (status & CAM_AUTOSNS_VALID) {
1154 struct ccb_getdev cgd;
1155 struct scsi_sense_data *sense;
1156 int error_code, sense_key, asc, ascq;
1157 scsi_sense_action err_action;
1159 sense = &done_ccb->csio.sense_data;
1160 scsi_extract_sense(sense, &error_code,
1161 &sense_key, &asc, &ascq);
1163 * Grab the inquiry data for this device.
1165 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
1166 CAM_PRIORITY_NORMAL);
1167 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1168 xpt_action((union ccb *)&cgd);
1169 err_action = scsi_error_action(&done_ccb->csio,
1172 * If the error is "invalid field in CDB",
1173 * and the load/eject flag is set, turn the
1174 * flag off and try again. This is just in
1175 * case the drive in question barfs on the
1176 * load eject flag. The CAM code should set
1177 * the load/eject flag by default for
1181 * Should we check to see what the specific
1182 * scsi status is?? Or does it not matter
1183 * since we already know that there was an
1184 * error, and we know what the specific
1185 * error code was, and we know what the
1188 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1189 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1190 (asc == 0x24) && (ascq == 0x00) &&
1191 (done_ccb->ccb_h.retry_count > 0)) {
1193 scsi_cmd->how &= ~SSS_LOEJ;
1194 xpt_action(done_ccb);
1195 } else if ((done_ccb->ccb_h.retry_count > 1)
1196 && ((err_action & SS_MASK) != SS_FAIL)) {
1199 * In this case, the error recovery
1200 * command failed, but we've got
1201 * some retries left on it. Give
1202 * it another try unless this is an
1203 * unretryable error.
1205 /* set the timeout to .5 sec */
1207 RELSIM_RELEASE_AFTER_TIMEOUT;
1209 xpt_action(done_ccb);
1213 * Perform the final retry with the original
1214 * CCB so that final error processing is
1215 * performed by the owner of the CCB.
1220 save_ccb = xpt_alloc_ccb_nowait();
1221 if (save_ccb == NULL)
1223 bcopy(done_ccb, save_ccb, sizeof(*save_ccb));
1224 periph->flags |= CAM_PERIPH_SENSE_INPROG;
1226 * Send a Request Sense to the device. We
1227 * assume that we are in a contingent allegiance
1228 * condition so we do not tag this request.
1230 scsi_request_sense(&done_ccb->csio, /*retries*/1,
1232 &save_ccb->csio.sense_data,
1233 save_ccb->csio.sense_len,
1234 CAM_TAG_ACTION_NONE,
1235 /*sense_len*/SSD_FULL_SIZE,
1237 done_ccb->ccb_h.pinfo.priority--;
1238 done_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1239 done_ccb->ccb_h.saved_ccb_ptr = save_ccb;
1240 done_ccb->ccb_h.recovery_depth++;
1241 xpt_action(done_ccb);
1246 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
1247 xpt_free_ccb(saved_ccb);
1248 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1249 xpt_action(done_ccb);
1253 /* decrement the retry count */
1255 * XXX This isn't appropriate in all cases. Restructure,
1256 * so that the retry count is only decremented on an
1257 * actual retry. Remeber that the orignal ccb had its
1258 * retry count dropped before entering recovery, so
1259 * doing it again is a bug.
1261 if (done_ccb->ccb_h.retry_count > 0)
1262 done_ccb->ccb_h.retry_count--;
1264 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on recovery
1267 cam_release_devq(done_ccb->ccb_h.path,
1268 /*relsim_flags*/relsim_flags,
1271 /*getcount_only*/0);
1272 /* Drop freeze taken, if this recovery request got error. */
1274 cam_release_devq(done_ccb->ccb_h.path,
1278 /*getcount_only*/0);
1283 * Generic Async Event handler. Peripheral drivers usually
1284 * filter out the events that require personal attention,
1285 * and leave the rest to this function.
1288 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1289 struct cam_path *path, void *arg)
1292 case AC_LOST_DEVICE:
1293 cam_periph_invalidate(periph);
1301 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1303 struct ccb_getdevstats cgds;
1305 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1306 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1307 xpt_action((union ccb *)&cgds);
1308 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1312 cam_periph_freeze_after_event(struct cam_periph *periph,
1313 struct timeval* event_time, u_int duration_ms)
1315 struct timeval delta;
1316 struct timeval duration_tv;
1319 timevalsub(&delta, event_time);
1320 duration_tv.tv_sec = duration_ms / 1000;
1321 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1322 if (timevalcmp(&delta, &duration_tv, <)) {
1323 timevalsub(&duration_tv, &delta);
1325 duration_ms = duration_tv.tv_sec * 1000;
1326 duration_ms += duration_tv.tv_usec / 1000;
1327 cam_freeze_devq(periph->path);
1328 cam_release_devq(periph->path,
1329 RELSIM_RELEASE_AFTER_TIMEOUT,
1331 /*timeout*/duration_ms,
1332 /*getcount_only*/0);
1338 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags,
1339 u_int32_t sense_flags,
1340 int *openings, u_int32_t *relsim_flags,
1341 u_int32_t *timeout, const char **action_string)
1345 switch (ccb->csio.scsi_status) {
1346 case SCSI_STATUS_OK:
1347 case SCSI_STATUS_COND_MET:
1348 case SCSI_STATUS_INTERMED:
1349 case SCSI_STATUS_INTERMED_COND_MET:
1352 case SCSI_STATUS_CMD_TERMINATED:
1353 case SCSI_STATUS_CHECK_COND:
1355 xpt_print(ccb->ccb_h.path, "SCSI status error\n");
1356 error = camperiphscsisenseerror(ccb,
1364 case SCSI_STATUS_QUEUE_FULL:
1367 struct ccb_getdevstats cgds;
1370 * First off, find out what the current
1371 * transaction counts are.
1373 xpt_setup_ccb(&cgds.ccb_h,
1375 CAM_PRIORITY_NORMAL);
1376 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1377 xpt_action((union ccb *)&cgds);
1380 * If we were the only transaction active, treat
1381 * the QUEUE FULL as if it were a BUSY condition.
1383 if (cgds.dev_active != 0) {
1387 * Reduce the number of openings to
1388 * be 1 less than the amount it took
1389 * to get a queue full bounded by the
1390 * minimum allowed tag count for this
1393 total_openings = cgds.dev_active + cgds.dev_openings;
1394 *openings = cgds.dev_active;
1395 if (*openings < cgds.mintags)
1396 *openings = cgds.mintags;
1397 if (*openings < total_openings)
1398 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1401 * Some devices report queue full for
1402 * temporary resource shortages. For
1403 * this reason, we allow a minimum
1404 * tag count to be entered via a
1405 * quirk entry to prevent the queue
1406 * count on these devices from falling
1407 * to a pessimisticly low value. We
1408 * still wait for the next successful
1409 * completion, however, before queueing
1410 * more transactions to the device.
1412 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1417 xpt_print(ccb->ccb_h.path, "Queue full\n");
1423 case SCSI_STATUS_BUSY:
1425 * Restart the queue after either another
1426 * command completes or a 1 second timeout.
1429 xpt_print(ccb->ccb_h.path, "Device busy\n");
1431 if (ccb->ccb_h.retry_count > 0) {
1432 ccb->ccb_h.retry_count--;
1434 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1435 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1441 case SCSI_STATUS_RESERV_CONFLICT:
1442 xpt_print(ccb->ccb_h.path, "Reservation conflict\n");
1446 xpt_print(ccb->ccb_h.path, "SCSI status 0x%x\n",
1447 ccb->csio.scsi_status);
1455 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags,
1456 u_int32_t sense_flags,
1457 int *openings, u_int32_t *relsim_flags,
1458 u_int32_t *timeout, const char **action_string)
1460 struct cam_periph *periph;
1461 union ccb *orig_ccb = ccb;
1464 periph = xpt_path_periph(ccb->ccb_h.path);
1466 (CAM_PERIPH_RECOVERY_INPROG | CAM_PERIPH_SENSE_INPROG)) {
1468 * If error recovery is already in progress, don't attempt
1469 * to process this error, but requeue it unconditionally
1470 * and attempt to process it once error recovery has
1471 * completed. This failed command is probably related to
1472 * the error that caused the currently active error recovery
1473 * action so our current recovery efforts should also
1474 * address this command. Be aware that the error recovery
1475 * code assumes that only one recovery action is in progress
1476 * on a particular peripheral instance at any given time
1477 * (e.g. only one saved CCB for error recovery) so it is
1478 * imperitive that we don't violate this assumption.
1482 scsi_sense_action err_action;
1483 struct ccb_getdev cgd;
1486 * Grab the inquiry data for this device.
1488 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1489 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1490 xpt_action((union ccb *)&cgd);
1492 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1493 err_action = scsi_error_action(&ccb->csio,
1496 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1497 err_action = SS_REQSENSE;
1499 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1501 error = err_action & SS_ERRMASK;
1504 * If the recovery action will consume a retry,
1505 * make sure we actually have retries available.
1507 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1508 if (ccb->ccb_h.retry_count > 0 &&
1509 (periph->flags & CAM_PERIPH_INVALID) == 0)
1510 ccb->ccb_h.retry_count--;
1512 *action_string = "Retries exhausted";
1513 goto sense_error_done;
1517 if ((err_action & SS_MASK) >= SS_START) {
1519 * Do common portions of commands that
1520 * use recovery CCBs.
1522 orig_ccb = xpt_alloc_ccb_nowait();
1523 if (orig_ccb == NULL) {
1524 *action_string = "Can't allocate recovery CCB";
1525 goto sense_error_done;
1528 * Clear freeze flag for original request here, as
1529 * this freeze will be dropped as part of ERESTART.
1531 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1532 bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
1535 switch (err_action & SS_MASK) {
1537 *action_string = "No recovery action needed";
1541 *action_string = "Retrying command (per sense data)";
1545 *action_string = "Unretryable error";
1550 if (SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1551 xpt_free_ccb(orig_ccb);
1552 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1553 *action_string = "Will not autostart a "
1554 "sequential access device";
1555 err_action = SS_FAIL;
1561 * Send a start unit command to the device, and
1562 * then retry the command.
1564 *action_string = "Attempting to start unit";
1565 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1568 * Check for removable media and set
1569 * load/eject flag appropriately.
1571 if (SID_IS_REMOVABLE(&cgd.inq_data))
1576 scsi_start_stop(&ccb->csio,
1590 * Send a Test Unit Ready to the device.
1591 * If the 'many' flag is set, we send 120
1592 * test unit ready commands, one every half
1593 * second. Otherwise, we just send one TUR.
1594 * We only want to do this if the retry
1595 * count has not been exhausted.
1599 if ((err_action & SSQ_MANY) != 0) {
1600 *action_string = "Polling device for readiness";
1603 *action_string = "Testing device for readiness";
1606 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1607 scsi_test_unit_ready(&ccb->csio,
1615 * Accomplish our 500ms delay by deferring
1616 * the release of our device queue appropriately.
1618 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1624 *action_string = "Requesting SCSI sense data";
1625 periph->flags |= CAM_PERIPH_SENSE_INPROG;
1627 * Send a Request Sense to the device. We
1628 * assume that we are in a contingent allegiance
1629 * condition so we do not tag this request.
1631 scsi_request_sense(&ccb->csio, /*retries*/1,
1633 &orig_ccb->csio.sense_data,
1634 orig_ccb->csio.sense_len,
1635 CAM_TAG_ACTION_NONE,
1636 /*sense_len*/SSD_FULL_SIZE,
1641 panic("Unhandled error action %x", err_action);
1644 if ((err_action & SS_MASK) >= SS_START) {
1646 * Drop the priority, so that the recovery
1647 * CCB is the first to execute. Freeze the queue
1648 * after this command is sent so that we can
1649 * restore the old csio and have it queued in
1650 * the proper order before we release normal
1651 * transactions to the device.
1653 ccb->ccb_h.pinfo.priority--;
1654 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1655 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1656 ccb->ccb_h.recovery_depth = 0;
1661 if ((err_action & SSQ_PRINT_SENSE) != 0
1662 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1663 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1669 * Generic error handler. Peripheral drivers usually filter
1670 * out the errors that they handle in a unique mannor, then
1671 * call this function.
1674 cam_periph_error(union ccb *ccb, cam_flags camflags,
1675 u_int32_t sense_flags, union ccb *save_ccb)
1677 struct cam_periph *periph;
1678 const char *action_string;
1681 int error, printed = 0;
1683 u_int32_t relsim_flags;
1684 u_int32_t timeout = 0;
1686 periph = xpt_path_periph(ccb->ccb_h.path);
1687 action_string = NULL;
1688 status = ccb->ccb_h.status;
1689 frozen = (status & CAM_DEV_QFRZN) != 0;
1690 status &= CAM_STATUS_MASK;
1691 openings = relsim_flags = 0;
1697 case CAM_SCSI_STATUS_ERROR:
1698 error = camperiphscsistatuserror(ccb,
1706 case CAM_AUTOSENSE_FAIL:
1707 xpt_print(ccb->ccb_h.path, "AutoSense failed\n");
1708 error = EIO; /* we have to kill the command */
1710 case CAM_ATA_STATUS_ERROR:
1711 if (bootverbose && printed == 0) {
1712 xpt_print(ccb->ccb_h.path, "ATA status error\n");
1713 cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1717 case CAM_REQ_CMP_ERR:
1718 if (bootverbose && printed == 0) {
1719 xpt_print(ccb->ccb_h.path,
1720 "Request completed with CAM_REQ_CMP_ERR\n");
1724 case CAM_CMD_TIMEOUT:
1725 if (bootverbose && printed == 0) {
1726 xpt_print(ccb->ccb_h.path, "Command timed out\n");
1730 case CAM_UNEXP_BUSFREE:
1731 if (bootverbose && printed == 0) {
1732 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n");
1736 case CAM_UNCOR_PARITY:
1737 if (bootverbose && printed == 0) {
1738 xpt_print(ccb->ccb_h.path,
1739 "Uncorrected parity error\n");
1743 case CAM_DATA_RUN_ERR:
1744 if (bootverbose && printed == 0) {
1745 xpt_print(ccb->ccb_h.path, "Data overrun\n");
1748 /* decrement the number of retries */
1749 if (ccb->ccb_h.retry_count > 0 &&
1750 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1751 ccb->ccb_h.retry_count--;
1754 action_string = "Retries exhausted";
1760 case CAM_MSG_REJECT_REC:
1761 /* XXX Don't know that these are correct */
1764 case CAM_SEL_TIMEOUT:
1765 if ((camflags & CAM_RETRY_SELTO) != 0) {
1766 if (ccb->ccb_h.retry_count > 0 &&
1767 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1769 ccb->ccb_h.retry_count--;
1771 if (bootverbose && printed == 0) {
1772 xpt_print(ccb->ccb_h.path,
1773 "Selection timeout\n");
1778 * Wait a bit to give the device
1779 * time to recover before we try again.
1781 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1782 timeout = periph_selto_delay;
1785 action_string = "Retries exhausted";
1788 case CAM_DEV_NOT_THERE:
1790 struct cam_path *newpath;
1796 * For a selection timeout, we consider all of the LUNs on
1797 * the target to be gone. If the status is CAM_DEV_NOT_THERE,
1798 * then we only get rid of the device(s) specified by the
1799 * path in the original CCB.
1801 if (status == CAM_DEV_NOT_THERE)
1802 lun_id = xpt_path_lun_id(ccb->ccb_h.path);
1804 lun_id = CAM_LUN_WILDCARD;
1806 /* Should we do more if we can't create the path?? */
1807 if (xpt_create_path(&newpath, periph,
1808 xpt_path_path_id(ccb->ccb_h.path),
1809 xpt_path_target_id(ccb->ccb_h.path),
1810 lun_id) != CAM_REQ_CMP)
1814 * Let peripheral drivers know that this device has gone
1817 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1818 xpt_free_path(newpath);
1821 case CAM_REQ_INVALID:
1822 case CAM_PATH_INVALID:
1824 case CAM_PROVIDE_FAIL:
1825 case CAM_REQ_TOO_BIG:
1826 case CAM_LUN_INVALID:
1827 case CAM_TID_INVALID:
1830 case CAM_SCSI_BUS_RESET:
1833 * Commands that repeatedly timeout and cause these
1834 * kinds of error recovery actions, should return
1835 * CAM_CMD_TIMEOUT, which allows us to safely assume
1836 * that this command was an innocent bystander to
1837 * these events and should be unconditionally
1840 if (bootverbose && printed == 0) {
1841 xpt_print_path(ccb->ccb_h.path);
1842 if (status == CAM_BDR_SENT)
1843 printf("Bus Device Reset sent\n");
1845 printf("Bus Reset issued\n");
1849 case CAM_REQUEUE_REQ:
1850 /* Unconditional requeue */
1851 if (bootverbose && printed == 0) {
1852 xpt_print(ccb->ccb_h.path, "Request requeued\n");
1855 if ((periph->flags & CAM_PERIPH_INVALID) == 0)
1858 action_string = "Retries exhausted";
1862 case CAM_RESRC_UNAVAIL:
1863 /* Wait a bit for the resource shortage to abate. */
1864 timeout = periph_noresrc_delay;
1868 /* Wait a bit for the busy condition to abate. */
1869 timeout = periph_busy_delay;
1871 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1874 /* decrement the number of retries */
1875 if (ccb->ccb_h.retry_count > 0 &&
1876 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1877 ccb->ccb_h.retry_count--;
1879 if (bootverbose && printed == 0) {
1880 xpt_print(ccb->ccb_h.path, "CAM status 0x%x\n",
1886 action_string = "Retries exhausted";
1892 * If we have and error and are booting verbosely, whine
1893 * *unless* this was a non-retryable selection timeout.
1895 if (error != 0 && bootverbose &&
1896 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) {
1897 if (error != ERESTART) {
1898 if (action_string == NULL)
1899 action_string = "Unretryable error";
1900 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
1901 error, action_string);
1902 } else if (action_string != NULL)
1903 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1905 xpt_print(ccb->ccb_h.path, "Retrying command\n");
1908 /* Attempt a retry */
1909 if (error == ERESTART || error == 0) {
1911 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1912 if (error == ERESTART)
1915 cam_release_devq(ccb->ccb_h.path,
1919 /*getcount_only*/0);