2 * Common functions for CAM "type" (peripheral) drivers.
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
40 #include <sys/mutex.h>
43 #include <sys/devicestat.h>
47 #include <vm/vm_extern.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_queue.h>
52 #include <cam/cam_xpt_periph.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_debug.h>
55 #include <cam/cam_sim.h>
57 #include <cam/scsi/scsi_all.h>
58 #include <cam/scsi/scsi_message.h>
59 #include <cam/scsi/scsi_pass.h>
61 static u_int camperiphnextunit(struct periph_driver *p_drv,
62 u_int newunit, int wired,
63 path_id_t pathid, target_id_t target,
65 static u_int camperiphunit(struct periph_driver *p_drv,
66 path_id_t pathid, target_id_t target,
68 static void camperiphdone(struct cam_periph *periph,
70 static void camperiphfree(struct cam_periph *periph);
71 static int camperiphscsistatuserror(union ccb *ccb,
74 u_int32_t sense_flags,
76 u_int32_t *relsim_flags,
79 const char **action_string);
80 static int camperiphscsisenseerror(union ccb *ccb,
83 u_int32_t sense_flags,
85 u_int32_t *relsim_flags,
88 const char **action_string);
90 static int nperiph_drivers;
91 static int initialized = 0;
92 struct periph_driver **periph_drivers;
94 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
96 static int periph_selto_delay = 1000;
97 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
98 static int periph_noresrc_delay = 500;
99 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
100 static int periph_busy_delay = 500;
101 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
105 periphdriver_register(void *data)
107 struct periph_driver *drv = (struct periph_driver *)data;
108 struct periph_driver **newdrivers, **old;
112 ndrivers = nperiph_drivers + 2;
113 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
116 if (ndrivers != nperiph_drivers + 2) {
118 * Lost race against itself; go around.
121 free(newdrivers, M_CAMPERIPH);
125 bcopy(periph_drivers, newdrivers,
126 sizeof(*newdrivers) * nperiph_drivers);
127 newdrivers[nperiph_drivers] = drv;
128 newdrivers[nperiph_drivers + 1] = NULL;
129 old = periph_drivers;
130 periph_drivers = newdrivers;
134 free(old, M_CAMPERIPH);
135 /* If driver marked as early or it is late now, initialize it. */
136 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
142 periphdriver_init(int level)
146 initialized = max(initialized, level);
147 for (i = 0; periph_drivers[i] != NULL; i++) {
148 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
149 if (early == initialized)
150 (*periph_drivers[i]->init)();
155 cam_periph_alloc(periph_ctor_t *periph_ctor,
156 periph_oninv_t *periph_oninvalidate,
157 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
158 char *name, cam_periph_type type, struct cam_path *path,
159 ac_callback_t *ac_callback, ac_code code, void *arg)
161 struct periph_driver **p_drv;
163 struct cam_periph *periph;
164 struct cam_periph *cur_periph;
166 target_id_t target_id;
173 * Handle Hot-Plug scenarios. If there is already a peripheral
174 * of our type assigned to this path, we are likely waiting for
175 * final close on an old, invalidated, peripheral. If this is
176 * the case, queue up a deferred call to the peripheral's async
177 * handler. If it looks like a mistaken re-allocation, complain.
179 if ((periph = cam_periph_find(path, name)) != NULL) {
181 if ((periph->flags & CAM_PERIPH_INVALID) != 0
182 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
183 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
184 periph->deferred_callback = ac_callback;
185 periph->deferred_ac = code;
186 return (CAM_REQ_INPROG);
188 printf("cam_periph_alloc: attempt to re-allocate "
189 "valid device %s%d rejected flags %#x "
190 "refcount %d\n", periph->periph_name,
191 periph->unit_number, periph->flags,
194 return (CAM_REQ_INVALID);
197 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
201 return (CAM_RESRC_UNAVAIL);
206 sim = xpt_path_sim(path);
207 path_id = xpt_path_path_id(path);
208 target_id = xpt_path_target_id(path);
209 lun_id = xpt_path_lun_id(path);
210 periph->periph_start = periph_start;
211 periph->periph_dtor = periph_dtor;
212 periph->periph_oninval = periph_oninvalidate;
214 periph->periph_name = name;
215 periph->scheduled_priority = CAM_PRIORITY_NONE;
216 periph->immediate_priority = CAM_PRIORITY_NONE;
217 periph->refcount = 1; /* Dropped by invalidation. */
219 SLIST_INIT(&periph->ccb_list);
220 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
221 if (status != CAM_REQ_CMP)
226 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
227 if (strcmp((*p_drv)->driver_name, name) == 0)
230 if (*p_drv == NULL) {
231 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
233 xpt_free_path(periph->path);
234 free(periph, M_CAMPERIPH);
235 return (CAM_REQ_INVALID);
237 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
238 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
239 while (cur_periph != NULL
240 && cur_periph->unit_number < periph->unit_number)
241 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
242 if (cur_periph != NULL) {
243 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
244 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
246 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
247 (*p_drv)->generation++;
253 status = xpt_add_periph(periph);
254 if (status != CAM_REQ_CMP)
258 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
260 status = periph_ctor(periph, arg);
262 if (status == CAM_REQ_CMP)
266 switch (init_level) {
268 /* Initialized successfully */
271 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
272 xpt_remove_periph(periph);
276 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
278 xpt_free_path(periph->path);
281 free(periph, M_CAMPERIPH);
284 /* No cleanup to perform. */
287 panic("%s: Unknown init level", __func__);
293 * Find a peripheral structure with the specified path, target, lun,
294 * and (optionally) type. If the name is NULL, this function will return
295 * the first peripheral driver that matches the specified path.
298 cam_periph_find(struct cam_path *path, char *name)
300 struct periph_driver **p_drv;
301 struct cam_periph *periph;
304 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
306 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
309 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
310 if (xpt_path_comp(periph->path, path) == 0) {
312 cam_periph_assert(periph, MA_OWNED);
326 * Find peripheral driver instances attached to the specified path.
329 cam_periph_list(struct cam_path *path, struct sbuf *sb)
331 struct sbuf local_sb;
332 struct periph_driver **p_drv;
333 struct cam_periph *periph;
339 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
342 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
344 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
345 if (xpt_path_comp(periph->path, path) != 0)
348 if (sbuf_len(&local_sb) != 0)
349 sbuf_cat(&local_sb, ",");
351 sbuf_printf(&local_sb, "%s%d", periph->periph_name,
352 periph->unit_number);
354 if (sbuf_error(&local_sb) == ENOMEM) {
357 sbuf_delete(&local_sb);
364 sbuf_finish(&local_sb);
365 sbuf_cpy(sb, sbuf_data(&local_sb));
366 sbuf_delete(&local_sb);
371 cam_periph_acquire(struct cam_periph *periph)
375 status = CAM_REQ_CMP_ERR;
380 if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
382 status = CAM_REQ_CMP;
390 cam_periph_doacquire(struct cam_periph *periph)
394 KASSERT(periph->refcount >= 1,
395 ("cam_periph_doacquire() with refcount == %d", periph->refcount));
401 cam_periph_release_locked_buses(struct cam_periph *periph)
404 cam_periph_assert(periph, MA_OWNED);
405 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
406 if (--periph->refcount == 0)
407 camperiphfree(periph);
411 cam_periph_release_locked(struct cam_periph *periph)
418 cam_periph_release_locked_buses(periph);
423 cam_periph_release(struct cam_periph *periph)
430 cam_periph_assert(periph, MA_NOTOWNED);
431 mtx = cam_periph_mtx(periph);
433 cam_periph_release_locked(periph);
438 cam_periph_hold(struct cam_periph *periph, int priority)
443 * Increment the reference count on the peripheral
444 * while we wait for our lock attempt to succeed
445 * to ensure the peripheral doesn't disappear out
446 * from user us while we sleep.
449 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
452 cam_periph_assert(periph, MA_OWNED);
453 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
454 periph->flags |= CAM_PERIPH_LOCK_WANTED;
455 if ((error = cam_periph_sleep(periph, periph, priority,
456 "caplck", 0)) != 0) {
457 cam_periph_release_locked(periph);
460 if (periph->flags & CAM_PERIPH_INVALID) {
461 cam_periph_release_locked(periph);
466 periph->flags |= CAM_PERIPH_LOCKED;
471 cam_periph_unhold(struct cam_periph *periph)
474 cam_periph_assert(periph, MA_OWNED);
476 periph->flags &= ~CAM_PERIPH_LOCKED;
477 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
478 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
482 cam_periph_release_locked(periph);
486 * Look for the next unit number that is not currently in use for this
487 * peripheral type starting at "newunit". Also exclude unit numbers that
488 * are reserved by for future "hardwiring" unless we already know that this
489 * is a potential wired device. Only assume that the device is "wired" the
490 * first time through the loop since after that we'll be looking at unit
491 * numbers that did not match a wiring entry.
494 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
495 path_id_t pathid, target_id_t target, lun_id_t lun)
497 struct cam_periph *periph;
499 int i, val, dunit, r;
500 const char *dname, *strval;
502 periph_name = p_drv->driver_name;
505 for (periph = TAILQ_FIRST(&p_drv->units);
506 periph != NULL && periph->unit_number != newunit;
507 periph = TAILQ_NEXT(periph, unit_links))
510 if (periph != NULL && periph->unit_number == newunit) {
512 xpt_print(periph->path, "Duplicate Wired "
514 xpt_print(periph->path, "Second device (%s "
515 "device at scbus%d target %d lun %d) will "
516 "not be wired\n", periph_name, pathid,
526 * Don't match entries like "da 4" as a wired down
527 * device, but do match entries like "da 4 target 5"
528 * or even "da 4 scbus 1".
533 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
536 /* if no "target" and no specific scbus, skip */
537 if (resource_int_value(dname, dunit, "target", &val) &&
538 (resource_string_value(dname, dunit, "at",&strval)||
539 strcmp(strval, "scbus") == 0))
541 if (newunit == dunit)
551 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
552 target_id_t target, lun_id_t lun)
555 int wired, i, val, dunit;
556 const char *dname, *strval;
557 char pathbuf[32], *periph_name;
559 periph_name = p_drv->driver_name;
560 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
564 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
566 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
567 if (strcmp(strval, pathbuf) != 0)
571 if (resource_int_value(dname, dunit, "target", &val) == 0) {
576 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
588 * Either start from 0 looking for the next unit or from
589 * the unit number given in the resource config. This way,
590 * if we have wildcard matches, we don't return the same
593 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
599 cam_periph_invalidate(struct cam_periph *periph)
602 cam_periph_assert(periph, MA_OWNED);
604 * We only call this routine the first time a peripheral is
607 if ((periph->flags & CAM_PERIPH_INVALID) != 0)
610 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
611 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
612 xpt_denounce_periph(periph);
613 periph->flags |= CAM_PERIPH_INVALID;
614 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
615 if (periph->periph_oninval != NULL)
616 periph->periph_oninval(periph);
617 cam_periph_release_locked(periph);
621 camperiphfree(struct cam_periph *periph)
623 struct periph_driver **p_drv;
625 cam_periph_assert(periph, MA_OWNED);
626 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
627 periph->periph_name, periph->unit_number));
628 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
629 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
632 if (*p_drv == NULL) {
633 printf("camperiphfree: attempt to free non-existant periph\n");
638 * We need to set this flag before dropping the topology lock, to
639 * let anyone who is traversing the list that this peripheral is
640 * about to be freed, and there will be no more reference count
643 periph->flags |= CAM_PERIPH_FREE;
646 * The peripheral destructor semantics dictate calling with only the
647 * SIM mutex held. Since it might sleep, it should not be called
648 * with the topology lock held.
653 * We need to call the peripheral destructor prior to removing the
654 * peripheral from the list. Otherwise, we risk running into a
655 * scenario where the peripheral unit number may get reused
656 * (because it has been removed from the list), but some resources
657 * used by the peripheral are still hanging around. In particular,
658 * the devfs nodes used by some peripherals like the pass(4) driver
659 * aren't fully cleaned up until the destructor is run. If the
660 * unit number is reused before the devfs instance is fully gone,
663 if (periph->periph_dtor != NULL)
664 periph->periph_dtor(periph);
667 * The peripheral list is protected by the topology lock.
671 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
672 (*p_drv)->generation++;
674 xpt_remove_periph(periph);
677 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
678 xpt_print(periph->path, "Periph destroyed\n");
680 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
682 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
686 switch (periph->deferred_ac) {
687 case AC_FOUND_DEVICE:
688 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
689 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
693 case AC_PATH_REGISTERED:
694 ccb.ccb_h.func_code = XPT_PATH_INQ;
695 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
703 periph->deferred_callback(NULL, periph->deferred_ac,
706 xpt_free_path(periph->path);
707 free(periph, M_CAMPERIPH);
712 * Map user virtual pointers into kernel virtual address space, so we can
713 * access the memory. This is now a generic function that centralizes most
714 * of the sanity checks on the data flags, if any.
715 * This also only works for up to MAXPHYS memory. Since we use
716 * buffers to map stuff in and out, we're limited to the buffer size.
719 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
723 int flags[CAM_PERIPH_MAXMAPS];
724 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
725 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
726 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
729 maxmap = DFLTPHYS; /* traditional default */
730 else if (maxmap > MAXPHYS)
731 maxmap = MAXPHYS; /* for safety */
732 switch(ccb->ccb_h.func_code) {
734 if (ccb->cdm.match_buf_len == 0) {
735 printf("cam_periph_mapmem: invalid match buffer "
739 if (ccb->cdm.pattern_buf_len > 0) {
740 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
741 lengths[0] = ccb->cdm.pattern_buf_len;
742 dirs[0] = CAM_DIR_OUT;
743 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
744 lengths[1] = ccb->cdm.match_buf_len;
745 dirs[1] = CAM_DIR_IN;
748 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
749 lengths[0] = ccb->cdm.match_buf_len;
750 dirs[0] = CAM_DIR_IN;
754 * This request will not go to the hardware, no reason
755 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
760 case XPT_CONT_TARGET_IO:
761 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
763 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
765 data_ptrs[0] = &ccb->csio.data_ptr;
766 lengths[0] = ccb->csio.dxfer_len;
767 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
771 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
773 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
775 data_ptrs[0] = &ccb->ataio.data_ptr;
776 lengths[0] = ccb->ataio.dxfer_len;
777 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
781 data_ptrs[0] = &ccb->smpio.smp_request;
782 lengths[0] = ccb->smpio.smp_request_len;
783 dirs[0] = CAM_DIR_OUT;
784 data_ptrs[1] = &ccb->smpio.smp_response;
785 lengths[1] = ccb->smpio.smp_response_len;
786 dirs[1] = CAM_DIR_IN;
789 case XPT_DEV_ADVINFO:
790 if (ccb->cdai.bufsiz == 0)
793 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
794 lengths[0] = ccb->cdai.bufsiz;
795 dirs[0] = CAM_DIR_IN;
799 * This request will not go to the hardware, no reason
800 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
806 break; /* NOTREACHED */
810 * Check the transfer length and permissions first, so we don't
811 * have to unmap any previously mapped buffers.
813 for (i = 0; i < numbufs; i++) {
818 * The userland data pointer passed in may not be page
819 * aligned. vmapbuf() truncates the address to a page
820 * boundary, so if the address isn't page aligned, we'll
821 * need enough space for the given transfer length, plus
822 * whatever extra space is necessary to make it to the page
826 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){
827 printf("cam_periph_mapmem: attempt to map %lu bytes, "
828 "which is greater than %lu\n",
830 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
835 if (dirs[i] & CAM_DIR_OUT) {
836 flags[i] = BIO_WRITE;
839 if (dirs[i] & CAM_DIR_IN) {
846 * This keeps the the kernel stack of current thread from getting
847 * swapped. In low-memory situations where the kernel stack might
848 * otherwise get swapped out, this holds it and allows the thread
849 * to make progress and release the kernel mapped pages sooner.
851 * XXX KDM should I use P_NOSWAP instead?
855 for (i = 0; i < numbufs; i++) {
859 mapinfo->bp[i] = getpbuf(NULL);
861 /* save the buffer's data address */
862 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
864 /* put our pointer in the data slot */
865 mapinfo->bp[i]->b_data = *data_ptrs[i];
867 /* set the transfer length, we know it's < MAXPHYS */
868 mapinfo->bp[i]->b_bufsize = lengths[i];
870 /* set the direction */
871 mapinfo->bp[i]->b_iocmd = flags[i];
874 * Map the buffer into kernel memory.
876 * Note that useracc() alone is not a sufficient test.
877 * vmapbuf() can still fail due to a smaller file mapped
878 * into a larger area of VM, or if userland races against
879 * vmapbuf() after the useracc() check.
881 if (vmapbuf(mapinfo->bp[i], 1) < 0) {
882 for (j = 0; j < i; ++j) {
883 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
884 vunmapbuf(mapinfo->bp[j]);
885 relpbuf(mapinfo->bp[j], NULL);
887 relpbuf(mapinfo->bp[i], NULL);
892 /* set our pointer to the new mapped area */
893 *data_ptrs[i] = mapinfo->bp[i]->b_data;
895 mapinfo->num_bufs_used++;
899 * Now that we've gotten this far, change ownership to the kernel
900 * of the buffers so that we don't run afoul of returning to user
901 * space with locks (on the buffer) held.
903 for (i = 0; i < numbufs; i++) {
904 BUF_KERNPROC(mapinfo->bp[i]);
912 * Unmap memory segments mapped into kernel virtual address space by
913 * cam_periph_mapmem().
916 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
919 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
921 if (mapinfo->num_bufs_used <= 0) {
922 /* nothing to free and the process wasn't held. */
926 switch (ccb->ccb_h.func_code) {
928 numbufs = min(mapinfo->num_bufs_used, 2);
931 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
933 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
934 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
938 case XPT_CONT_TARGET_IO:
939 data_ptrs[0] = &ccb->csio.data_ptr;
940 numbufs = min(mapinfo->num_bufs_used, 1);
943 data_ptrs[0] = &ccb->ataio.data_ptr;
944 numbufs = min(mapinfo->num_bufs_used, 1);
947 numbufs = min(mapinfo->num_bufs_used, 2);
948 data_ptrs[0] = &ccb->smpio.smp_request;
949 data_ptrs[1] = &ccb->smpio.smp_response;
951 case XPT_DEV_ADVINFO:
952 numbufs = min(mapinfo->num_bufs_used, 1);
953 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
956 /* allow ourselves to be swapped once again */
959 break; /* NOTREACHED */
962 for (i = 0; i < numbufs; i++) {
963 /* Set the user's pointer back to the original value */
964 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
966 /* unmap the buffer */
967 vunmapbuf(mapinfo->bp[i]);
969 /* release the buffer */
970 relpbuf(mapinfo->bp[i], NULL);
973 /* allow ourselves to be swapped once again */
978 cam_periph_ccbwait(union ccb *ccb)
981 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
982 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
983 xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp, PRIBIO,
988 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
989 int (*error_routine)(union ccb *ccb,
991 u_int32_t sense_flags))
1000 case CAMGETPASSTHRU:
1001 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1002 xpt_setup_ccb(&ccb->ccb_h,
1004 CAM_PRIORITY_NORMAL);
1005 ccb->ccb_h.func_code = XPT_GDEVLIST;
1008 * Basically, the point of this is that we go through
1009 * getting the list of devices, until we find a passthrough
1010 * device. In the current version of the CAM code, the
1011 * only way to determine what type of device we're dealing
1012 * with is by its name.
1014 while (found == 0) {
1015 ccb->cgdl.index = 0;
1016 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
1017 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
1019 /* we want the next device in the list */
1021 if (strncmp(ccb->cgdl.periph_name,
1027 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
1029 ccb->cgdl.periph_name[0] = '\0';
1030 ccb->cgdl.unit_number = 0;
1035 /* copy the result back out */
1036 bcopy(ccb, addr, sizeof(union ccb));
1038 /* and release the ccb */
1039 xpt_release_ccb(ccb);
1050 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1053 /* Caller will release the CCB */
1054 wakeup(&done_ccb->ccb_h.cbfcnp);
1058 cam_periph_runccb(union ccb *ccb,
1059 int (*error_routine)(union ccb *ccb,
1061 u_int32_t sense_flags),
1062 cam_flags camflags, u_int32_t sense_flags,
1065 struct bintime *starttime;
1066 struct bintime ltime;
1070 xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1073 * If the user has supplied a stats structure, and if we understand
1074 * this particular type of ccb, record the transaction start.
1076 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1077 ccb->ccb_h.func_code == XPT_ATA_IO)) {
1079 binuptime(starttime);
1080 devstat_start_transaction(ds, starttime);
1083 ccb->ccb_h.cbfcnp = cam_periph_done;
1087 cam_periph_ccbwait(ccb);
1088 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1090 else if (error_routine != NULL)
1091 error = (*error_routine)(ccb, camflags, sense_flags);
1095 } while (error == ERESTART);
1097 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1098 cam_release_devq(ccb->ccb_h.path,
1099 /* relsim_flags */0,
1102 /* getcount_only */ FALSE);
1103 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1107 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1108 devstat_end_transaction(ds,
1109 ccb->csio.dxfer_len - ccb->csio.resid,
1110 ccb->csio.tag_action & 0x3,
1111 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
1112 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
1113 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1115 DEVSTAT_READ, NULL, starttime);
1116 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1117 devstat_end_transaction(ds,
1118 ccb->ataio.dxfer_len - ccb->ataio.resid,
1119 ccb->ataio.tag_action & 0x3,
1120 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
1121 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
1122 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1124 DEVSTAT_READ, NULL, starttime);
1132 cam_freeze_devq(struct cam_path *path)
1134 struct ccb_hdr ccb_h;
1136 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1137 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1138 ccb_h.func_code = XPT_NOOP;
1139 ccb_h.flags = CAM_DEV_QFREEZE;
1140 xpt_action((union ccb *)&ccb_h);
1144 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1145 u_int32_t openings, u_int32_t arg,
1148 struct ccb_relsim crs;
1150 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1151 relsim_flags, openings, arg, getcount_only));
1152 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1153 crs.ccb_h.func_code = XPT_REL_SIMQ;
1154 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1155 crs.release_flags = relsim_flags;
1156 crs.openings = openings;
1157 crs.release_timeout = arg;
1158 xpt_action((union ccb *)&crs);
1159 return (crs.qfrozen_cnt);
1162 #define saved_ccb_ptr ppriv_ptr0
1164 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1166 union ccb *saved_ccb;
1168 struct scsi_start_stop_unit *scsi_cmd;
1169 int error_code, sense_key, asc, ascq;
1171 scsi_cmd = (struct scsi_start_stop_unit *)
1172 &done_ccb->csio.cdb_io.cdb_bytes;
1173 status = done_ccb->ccb_h.status;
1175 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1176 if (scsi_extract_sense_ccb(done_ccb,
1177 &error_code, &sense_key, &asc, &ascq)) {
1179 * If the error is "invalid field in CDB",
1180 * and the load/eject flag is set, turn the
1181 * flag off and try again. This is just in
1182 * case the drive in question barfs on the
1183 * load eject flag. The CAM code should set
1184 * the load/eject flag by default for
1187 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1188 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1189 (asc == 0x24) && (ascq == 0x00)) {
1190 scsi_cmd->how &= ~SSS_LOEJ;
1191 if (status & CAM_DEV_QFRZN) {
1192 cam_release_devq(done_ccb->ccb_h.path,
1194 done_ccb->ccb_h.status &=
1197 xpt_action(done_ccb);
1201 if (cam_periph_error(done_ccb,
1202 0, SF_RETRY_UA | SF_NO_PRINT, NULL) == ERESTART)
1204 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1205 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1206 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1210 * If we have successfully taken a device from the not
1211 * ready to ready state, re-scan the device and re-get
1212 * the inquiry information. Many devices (mostly disks)
1213 * don't properly report their inquiry information unless
1216 if (scsi_cmd->opcode == START_STOP_UNIT)
1217 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1221 * Perform the final retry with the original CCB so that final
1222 * error processing is performed by the owner of the CCB.
1224 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1225 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
1226 xpt_free_ccb(saved_ccb);
1227 if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1228 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1229 xpt_action(done_ccb);
1232 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1233 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1237 * Generic Async Event handler. Peripheral drivers usually
1238 * filter out the events that require personal attention,
1239 * and leave the rest to this function.
1242 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1243 struct cam_path *path, void *arg)
1246 case AC_LOST_DEVICE:
1247 cam_periph_invalidate(periph);
1255 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1257 struct ccb_getdevstats cgds;
1259 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1260 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1261 xpt_action((union ccb *)&cgds);
1262 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1266 cam_periph_freeze_after_event(struct cam_periph *periph,
1267 struct timeval* event_time, u_int duration_ms)
1269 struct timeval delta;
1270 struct timeval duration_tv;
1272 if (!timevalisset(event_time))
1276 timevalsub(&delta, event_time);
1277 duration_tv.tv_sec = duration_ms / 1000;
1278 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1279 if (timevalcmp(&delta, &duration_tv, <)) {
1280 timevalsub(&duration_tv, &delta);
1282 duration_ms = duration_tv.tv_sec * 1000;
1283 duration_ms += duration_tv.tv_usec / 1000;
1284 cam_freeze_devq(periph->path);
1285 cam_release_devq(periph->path,
1286 RELSIM_RELEASE_AFTER_TIMEOUT,
1288 /*timeout*/duration_ms,
1289 /*getcount_only*/0);
1295 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1296 cam_flags camflags, u_int32_t sense_flags,
1297 int *openings, u_int32_t *relsim_flags,
1298 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1302 switch (ccb->csio.scsi_status) {
1303 case SCSI_STATUS_OK:
1304 case SCSI_STATUS_COND_MET:
1305 case SCSI_STATUS_INTERMED:
1306 case SCSI_STATUS_INTERMED_COND_MET:
1309 case SCSI_STATUS_CMD_TERMINATED:
1310 case SCSI_STATUS_CHECK_COND:
1311 error = camperiphscsisenseerror(ccb, orig_ccb,
1320 case SCSI_STATUS_QUEUE_FULL:
1323 struct ccb_getdevstats cgds;
1326 * First off, find out what the current
1327 * transaction counts are.
1329 xpt_setup_ccb(&cgds.ccb_h,
1331 CAM_PRIORITY_NORMAL);
1332 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1333 xpt_action((union ccb *)&cgds);
1336 * If we were the only transaction active, treat
1337 * the QUEUE FULL as if it were a BUSY condition.
1339 if (cgds.dev_active != 0) {
1343 * Reduce the number of openings to
1344 * be 1 less than the amount it took
1345 * to get a queue full bounded by the
1346 * minimum allowed tag count for this
1349 total_openings = cgds.dev_active + cgds.dev_openings;
1350 *openings = cgds.dev_active;
1351 if (*openings < cgds.mintags)
1352 *openings = cgds.mintags;
1353 if (*openings < total_openings)
1354 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1357 * Some devices report queue full for
1358 * temporary resource shortages. For
1359 * this reason, we allow a minimum
1360 * tag count to be entered via a
1361 * quirk entry to prevent the queue
1362 * count on these devices from falling
1363 * to a pessimisticly low value. We
1364 * still wait for the next successful
1365 * completion, however, before queueing
1366 * more transactions to the device.
1368 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1372 *action &= ~SSQ_PRINT_SENSE;
1377 case SCSI_STATUS_BUSY:
1379 * Restart the queue after either another
1380 * command completes or a 1 second timeout.
1382 if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1383 (ccb->ccb_h.retry_count--) > 0) {
1385 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1386 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1392 case SCSI_STATUS_RESERV_CONFLICT:
1401 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1402 cam_flags camflags, u_int32_t sense_flags,
1403 int *openings, u_int32_t *relsim_flags,
1404 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1406 struct cam_periph *periph;
1407 union ccb *orig_ccb = ccb;
1408 int error, recoveryccb;
1410 periph = xpt_path_periph(ccb->ccb_h.path);
1411 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1412 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1414 * If error recovery is already in progress, don't attempt
1415 * to process this error, but requeue it unconditionally
1416 * and attempt to process it once error recovery has
1417 * completed. This failed command is probably related to
1418 * the error that caused the currently active error recovery
1419 * action so our current recovery efforts should also
1420 * address this command. Be aware that the error recovery
1421 * code assumes that only one recovery action is in progress
1422 * on a particular peripheral instance at any given time
1423 * (e.g. only one saved CCB for error recovery) so it is
1424 * imperitive that we don't violate this assumption.
1427 *action &= ~SSQ_PRINT_SENSE;
1429 scsi_sense_action err_action;
1430 struct ccb_getdev cgd;
1433 * Grab the inquiry data for this device.
1435 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1436 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1437 xpt_action((union ccb *)&cgd);
1439 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1441 error = err_action & SS_ERRMASK;
1444 * Do not autostart sequential access devices
1445 * to avoid unexpected tape loading.
1447 if ((err_action & SS_MASK) == SS_START &&
1448 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1449 *action_string = "Will not autostart a "
1450 "sequential access device";
1451 goto sense_error_done;
1455 * Avoid recovery recursion if recovery action is the same.
1457 if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1458 if (((err_action & SS_MASK) == SS_START &&
1459 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1460 ((err_action & SS_MASK) == SS_TUR &&
1461 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1462 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1463 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1469 * If the recovery action will consume a retry,
1470 * make sure we actually have retries available.
1472 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1473 if (ccb->ccb_h.retry_count > 0 &&
1474 (periph->flags & CAM_PERIPH_INVALID) == 0)
1475 ccb->ccb_h.retry_count--;
1477 *action_string = "Retries exhausted";
1478 goto sense_error_done;
1482 if ((err_action & SS_MASK) >= SS_START) {
1484 * Do common portions of commands that
1485 * use recovery CCBs.
1487 orig_ccb = xpt_alloc_ccb_nowait();
1488 if (orig_ccb == NULL) {
1489 *action_string = "Can't allocate recovery CCB";
1490 goto sense_error_done;
1493 * Clear freeze flag for original request here, as
1494 * this freeze will be dropped as part of ERESTART.
1496 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1497 bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
1500 switch (err_action & SS_MASK) {
1502 *action_string = "No recovery action needed";
1506 *action_string = "Retrying command (per sense data)";
1510 *action_string = "Unretryable error";
1517 * Send a start unit command to the device, and
1518 * then retry the command.
1520 *action_string = "Attempting to start unit";
1521 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1524 * Check for removable media and set
1525 * load/eject flag appropriately.
1527 if (SID_IS_REMOVABLE(&cgd.inq_data))
1532 scsi_start_stop(&ccb->csio,
1546 * Send a Test Unit Ready to the device.
1547 * If the 'many' flag is set, we send 120
1548 * test unit ready commands, one every half
1549 * second. Otherwise, we just send one TUR.
1550 * We only want to do this if the retry
1551 * count has not been exhausted.
1555 if ((err_action & SSQ_MANY) != 0) {
1556 *action_string = "Polling device for readiness";
1559 *action_string = "Testing device for readiness";
1562 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1563 scsi_test_unit_ready(&ccb->csio,
1571 * Accomplish our 500ms delay by deferring
1572 * the release of our device queue appropriately.
1574 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1579 panic("Unhandled error action %x", err_action);
1582 if ((err_action & SS_MASK) >= SS_START) {
1584 * Drop the priority, so that the recovery
1585 * CCB is the first to execute. Freeze the queue
1586 * after this command is sent so that we can
1587 * restore the old csio and have it queued in
1588 * the proper order before we release normal
1589 * transactions to the device.
1591 ccb->ccb_h.pinfo.priority--;
1592 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1593 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1599 *action = err_action;
1605 * Generic error handler. Peripheral drivers usually filter
1606 * out the errors that they handle in a unique mannor, then
1607 * call this function.
1610 cam_periph_error(union ccb *ccb, cam_flags camflags,
1611 u_int32_t sense_flags, union ccb *save_ccb)
1613 struct cam_path *newpath;
1614 union ccb *orig_ccb, *scan_ccb;
1615 struct cam_periph *periph;
1616 const char *action_string;
1618 int frozen, error, openings;
1619 u_int32_t action, relsim_flags, timeout;
1621 action = SSQ_PRINT_SENSE;
1622 periph = xpt_path_periph(ccb->ccb_h.path);
1623 action_string = NULL;
1624 status = ccb->ccb_h.status;
1625 frozen = (status & CAM_DEV_QFRZN) != 0;
1626 status &= CAM_STATUS_MASK;
1627 openings = relsim_flags = timeout = 0;
1633 action &= ~SSQ_PRINT_SENSE;
1635 case CAM_SCSI_STATUS_ERROR:
1636 error = camperiphscsistatuserror(ccb, &orig_ccb,
1637 camflags, sense_flags, &openings, &relsim_flags,
1638 &timeout, &action, &action_string);
1640 case CAM_AUTOSENSE_FAIL:
1641 error = EIO; /* we have to kill the command */
1645 case CAM_MSG_REJECT_REC:
1646 /* XXX Don't know that these are correct */
1649 case CAM_SEL_TIMEOUT:
1650 if ((camflags & CAM_RETRY_SELTO) != 0) {
1651 if (ccb->ccb_h.retry_count > 0 &&
1652 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1653 ccb->ccb_h.retry_count--;
1657 * Wait a bit to give the device
1658 * time to recover before we try again.
1660 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1661 timeout = periph_selto_delay;
1664 action_string = "Retries exhausted";
1667 case CAM_DEV_NOT_THERE:
1671 case CAM_REQ_INVALID:
1672 case CAM_PATH_INVALID:
1674 case CAM_PROVIDE_FAIL:
1675 case CAM_REQ_TOO_BIG:
1676 case CAM_LUN_INVALID:
1677 case CAM_TID_INVALID:
1678 case CAM_FUNC_NOTAVAIL:
1681 case CAM_SCSI_BUS_RESET:
1684 * Commands that repeatedly timeout and cause these
1685 * kinds of error recovery actions, should return
1686 * CAM_CMD_TIMEOUT, which allows us to safely assume
1687 * that this command was an innocent bystander to
1688 * these events and should be unconditionally
1691 case CAM_REQUEUE_REQ:
1692 /* Unconditional requeue if device is still there */
1693 if (periph->flags & CAM_PERIPH_INVALID) {
1694 action_string = "Periph was invalidated";
1696 } else if (sense_flags & SF_NO_RETRY) {
1698 action_string = "Retry was blocked";
1701 action &= ~SSQ_PRINT_SENSE;
1704 case CAM_RESRC_UNAVAIL:
1705 /* Wait a bit for the resource shortage to abate. */
1706 timeout = periph_noresrc_delay;
1710 /* Wait a bit for the busy condition to abate. */
1711 timeout = periph_busy_delay;
1713 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1715 case CAM_ATA_STATUS_ERROR:
1716 case CAM_REQ_CMP_ERR:
1717 case CAM_CMD_TIMEOUT:
1718 case CAM_UNEXP_BUSFREE:
1719 case CAM_UNCOR_PARITY:
1720 case CAM_DATA_RUN_ERR:
1722 if (periph->flags & CAM_PERIPH_INVALID) {
1724 action_string = "Periph was invalidated";
1725 } else if (ccb->ccb_h.retry_count == 0) {
1727 action_string = "Retries exhausted";
1728 } else if (sense_flags & SF_NO_RETRY) {
1730 action_string = "Retry was blocked";
1732 ccb->ccb_h.retry_count--;
1738 if ((sense_flags & SF_PRINT_ALWAYS) ||
1739 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
1740 action |= SSQ_PRINT_SENSE;
1741 else if (sense_flags & SF_NO_PRINT)
1742 action &= ~SSQ_PRINT_SENSE;
1743 if ((action & SSQ_PRINT_SENSE) != 0)
1744 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1745 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
1746 if (error != ERESTART) {
1747 if (action_string == NULL)
1748 action_string = "Unretryable error";
1749 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
1750 error, action_string);
1751 } else if (action_string != NULL)
1752 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1754 xpt_print(ccb->ccb_h.path, "Retrying command\n");
1757 if ((action & SSQ_LOST) != 0) {
1761 * For a selection timeout, we consider all of the LUNs on
1762 * the target to be gone. If the status is CAM_DEV_NOT_THERE,
1763 * then we only get rid of the device(s) specified by the
1764 * path in the original CCB.
1766 if (status == CAM_SEL_TIMEOUT)
1767 lun_id = CAM_LUN_WILDCARD;
1769 lun_id = xpt_path_lun_id(ccb->ccb_h.path);
1771 /* Should we do more if we can't create the path?? */
1772 if (xpt_create_path(&newpath, periph,
1773 xpt_path_path_id(ccb->ccb_h.path),
1774 xpt_path_target_id(ccb->ccb_h.path),
1775 lun_id) == CAM_REQ_CMP) {
1778 * Let peripheral drivers know that this
1779 * device has gone away.
1781 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1782 xpt_free_path(newpath);
1786 /* Broadcast UNIT ATTENTIONs to all periphs. */
1787 if ((action & SSQ_UA) != 0)
1788 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
1790 /* Rescan target on "Reported LUNs data has changed" */
1791 if ((action & SSQ_RESCAN) != 0) {
1792 if (xpt_create_path(&newpath, NULL,
1793 xpt_path_path_id(ccb->ccb_h.path),
1794 xpt_path_target_id(ccb->ccb_h.path),
1795 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
1797 scan_ccb = xpt_alloc_ccb_nowait();
1798 if (scan_ccb != NULL) {
1799 scan_ccb->ccb_h.path = newpath;
1800 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
1801 scan_ccb->crcn.flags = 0;
1802 xpt_rescan(scan_ccb);
1805 "Can't allocate CCB to rescan target\n");
1806 xpt_free_path(newpath);
1811 /* Attempt a retry */
1812 if (error == ERESTART || error == 0) {
1814 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1815 if (error == ERESTART)
1818 cam_release_devq(ccb->ccb_h.path,
1822 /*getcount_only*/0);