2 * Common functions for CAM "type" (peripheral) drivers.
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
40 #include <sys/mutex.h>
43 #include <sys/devicestat.h>
47 #include <vm/vm_extern.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_queue.h>
52 #include <cam/cam_xpt_periph.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_debug.h>
55 #include <cam/cam_sim.h>
57 #include <cam/scsi/scsi_all.h>
58 #include <cam/scsi/scsi_message.h>
59 #include <cam/scsi/scsi_pass.h>
61 static u_int camperiphnextunit(struct periph_driver *p_drv,
62 u_int newunit, int wired,
63 path_id_t pathid, target_id_t target,
65 static u_int camperiphunit(struct periph_driver *p_drv,
66 path_id_t pathid, target_id_t target,
68 static void camperiphdone(struct cam_periph *periph,
70 static void camperiphfree(struct cam_periph *periph);
71 static int camperiphscsistatuserror(union ccb *ccb,
74 u_int32_t sense_flags,
76 u_int32_t *relsim_flags,
79 const char **action_string);
80 static int camperiphscsisenseerror(union ccb *ccb,
83 u_int32_t sense_flags,
85 u_int32_t *relsim_flags,
88 const char **action_string);
90 static int nperiph_drivers;
91 static int initialized = 0;
92 struct periph_driver **periph_drivers;
94 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
96 static int periph_selto_delay = 1000;
97 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
98 static int periph_noresrc_delay = 500;
99 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
100 static int periph_busy_delay = 500;
101 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
105 periphdriver_register(void *data)
107 struct periph_driver *drv = (struct periph_driver *)data;
108 struct periph_driver **newdrivers, **old;
111 ndrivers = nperiph_drivers + 2;
112 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
115 bcopy(periph_drivers, newdrivers,
116 sizeof(*newdrivers) * nperiph_drivers);
117 newdrivers[nperiph_drivers] = drv;
118 newdrivers[nperiph_drivers + 1] = NULL;
119 old = periph_drivers;
120 periph_drivers = newdrivers;
122 free(old, M_CAMPERIPH);
124 /* If driver marked as early or it is late now, initialize it. */
125 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
131 periphdriver_init(int level)
135 initialized = max(initialized, level);
136 for (i = 0; periph_drivers[i] != NULL; i++) {
137 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
138 if (early == initialized)
139 (*periph_drivers[i]->init)();
144 cam_periph_alloc(periph_ctor_t *periph_ctor,
145 periph_oninv_t *periph_oninvalidate,
146 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
147 char *name, cam_periph_type type, struct cam_path *path,
148 ac_callback_t *ac_callback, ac_code code, void *arg)
150 struct periph_driver **p_drv;
152 struct cam_periph *periph;
153 struct cam_periph *cur_periph;
155 target_id_t target_id;
162 * Handle Hot-Plug scenarios. If there is already a peripheral
163 * of our type assigned to this path, we are likely waiting for
164 * final close on an old, invalidated, peripheral. If this is
165 * the case, queue up a deferred call to the peripheral's async
166 * handler. If it looks like a mistaken re-allocation, complain.
168 if ((periph = cam_periph_find(path, name)) != NULL) {
170 if ((periph->flags & CAM_PERIPH_INVALID) != 0
171 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
172 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
173 periph->deferred_callback = ac_callback;
174 periph->deferred_ac = code;
175 return (CAM_REQ_INPROG);
177 printf("cam_periph_alloc: attempt to re-allocate "
178 "valid device %s%d rejected flags %#x "
179 "refcount %d\n", periph->periph_name,
180 periph->unit_number, periph->flags,
183 return (CAM_REQ_INVALID);
186 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
190 return (CAM_RESRC_UNAVAIL);
195 sim = xpt_path_sim(path);
196 path_id = xpt_path_path_id(path);
197 target_id = xpt_path_target_id(path);
198 lun_id = xpt_path_lun_id(path);
199 cam_init_pinfo(&periph->pinfo);
200 periph->periph_start = periph_start;
201 periph->periph_dtor = periph_dtor;
202 periph->periph_oninval = periph_oninvalidate;
204 periph->periph_name = name;
205 periph->immediate_priority = CAM_PRIORITY_NONE;
206 periph->refcount = 0;
208 SLIST_INIT(&periph->ccb_list);
209 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
210 if (status != CAM_REQ_CMP)
215 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
216 if (strcmp((*p_drv)->driver_name, name) == 0)
219 if (*p_drv == NULL) {
220 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
221 xpt_free_path(periph->path);
222 free(periph, M_CAMPERIPH);
224 return (CAM_REQ_INVALID);
226 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
227 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
228 while (cur_periph != NULL
229 && cur_periph->unit_number < periph->unit_number)
230 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
231 if (cur_periph != NULL) {
232 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
233 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
235 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
236 (*p_drv)->generation++;
242 status = xpt_add_periph(periph);
243 if (status != CAM_REQ_CMP)
247 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
249 status = periph_ctor(periph, arg);
251 if (status == CAM_REQ_CMP)
255 switch (init_level) {
257 /* Initialized successfully */
260 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
261 xpt_remove_periph(periph);
265 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
267 xpt_free_path(periph->path);
270 free(periph, M_CAMPERIPH);
273 /* No cleanup to perform. */
276 panic("cam_periph_alloc: Unkown init level");
282 * Find a peripheral structure with the specified path, target, lun,
283 * and (optionally) type. If the name is NULL, this function will return
284 * the first peripheral driver that matches the specified path.
287 cam_periph_find(struct cam_path *path, char *name)
289 struct periph_driver **p_drv;
290 struct cam_periph *periph;
293 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
295 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
298 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
299 if (xpt_path_comp(periph->path, path) == 0) {
301 mtx_assert(periph->sim->mtx, MA_OWNED);
315 * Find peripheral driver instances attached to the specified path.
318 cam_periph_list(struct cam_path *path, struct sbuf *sb)
320 struct sbuf local_sb;
321 struct periph_driver **p_drv;
322 struct cam_periph *periph;
328 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
331 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
333 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
334 if (xpt_path_comp(periph->path, path) != 0)
337 if (sbuf_len(&local_sb) != 0)
338 sbuf_cat(&local_sb, ",");
340 sbuf_printf(&local_sb, "%s%d", periph->periph_name,
341 periph->unit_number);
343 if (sbuf_error(&local_sb) == ENOMEM) {
346 sbuf_delete(&local_sb);
353 sbuf_finish(&local_sb);
354 sbuf_cpy(sb, sbuf_data(&local_sb));
355 sbuf_delete(&local_sb);
360 cam_periph_acquire(struct cam_periph *periph)
364 status = CAM_REQ_CMP_ERR;
369 if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
371 status = CAM_REQ_CMP;
379 cam_periph_release_locked_buses(struct cam_periph *periph)
381 if (periph->refcount != 0) {
384 panic("%s: release of %p when refcount is zero\n ", __func__,
387 if (periph->refcount == 0
388 && (periph->flags & CAM_PERIPH_INVALID)) {
389 camperiphfree(periph);
394 cam_periph_release_locked(struct cam_periph *periph)
401 cam_periph_release_locked_buses(periph);
406 cam_periph_release(struct cam_periph *periph)
414 mtx_assert(sim->mtx, MA_NOTOWNED);
416 cam_periph_release_locked(periph);
417 mtx_unlock(sim->mtx);
421 cam_periph_hold(struct cam_periph *periph, int priority)
426 * Increment the reference count on the peripheral
427 * while we wait for our lock attempt to succeed
428 * to ensure the peripheral doesn't disappear out
429 * from user us while we sleep.
432 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
435 mtx_assert(periph->sim->mtx, MA_OWNED);
436 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
437 periph->flags |= CAM_PERIPH_LOCK_WANTED;
438 if ((error = mtx_sleep(periph, periph->sim->mtx, priority,
439 "caplck", 0)) != 0) {
440 cam_periph_release_locked(periph);
445 periph->flags |= CAM_PERIPH_LOCKED;
450 cam_periph_unhold(struct cam_periph *periph)
453 mtx_assert(periph->sim->mtx, MA_OWNED);
455 periph->flags &= ~CAM_PERIPH_LOCKED;
456 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
457 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
461 cam_periph_release_locked(periph);
465 * Look for the next unit number that is not currently in use for this
466 * peripheral type starting at "newunit". Also exclude unit numbers that
467 * are reserved by for future "hardwiring" unless we already know that this
468 * is a potential wired device. Only assume that the device is "wired" the
469 * first time through the loop since after that we'll be looking at unit
470 * numbers that did not match a wiring entry.
473 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
474 path_id_t pathid, target_id_t target, lun_id_t lun)
476 struct cam_periph *periph;
478 int i, val, dunit, r;
479 const char *dname, *strval;
481 periph_name = p_drv->driver_name;
484 for (periph = TAILQ_FIRST(&p_drv->units);
485 periph != NULL && periph->unit_number != newunit;
486 periph = TAILQ_NEXT(periph, unit_links))
489 if (periph != NULL && periph->unit_number == newunit) {
491 xpt_print(periph->path, "Duplicate Wired "
493 xpt_print(periph->path, "Second device (%s "
494 "device at scbus%d target %d lun %d) will "
495 "not be wired\n", periph_name, pathid,
505 * Don't match entries like "da 4" as a wired down
506 * device, but do match entries like "da 4 target 5"
507 * or even "da 4 scbus 1".
512 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
515 /* if no "target" and no specific scbus, skip */
516 if (resource_int_value(dname, dunit, "target", &val) &&
517 (resource_string_value(dname, dunit, "at",&strval)||
518 strcmp(strval, "scbus") == 0))
520 if (newunit == dunit)
530 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
531 target_id_t target, lun_id_t lun)
534 int wired, i, val, dunit;
535 const char *dname, *strval;
536 char pathbuf[32], *periph_name;
538 periph_name = p_drv->driver_name;
539 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
543 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
545 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
546 if (strcmp(strval, pathbuf) != 0)
550 if (resource_int_value(dname, dunit, "target", &val) == 0) {
555 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
567 * Either start from 0 looking for the next unit or from
568 * the unit number given in the resource config. This way,
569 * if we have wildcard matches, we don't return the same
572 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
578 cam_periph_invalidate(struct cam_periph *periph)
581 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
583 * We only call this routine the first time a peripheral is
586 if (((periph->flags & CAM_PERIPH_INVALID) == 0)
587 && (periph->periph_oninval != NULL))
588 periph->periph_oninval(periph);
590 periph->flags |= CAM_PERIPH_INVALID;
591 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
594 if (periph->refcount == 0)
595 camperiphfree(periph);
600 camperiphfree(struct cam_periph *periph)
602 struct periph_driver **p_drv;
604 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
605 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
608 if (*p_drv == NULL) {
609 printf("camperiphfree: attempt to free non-existant periph\n");
613 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
614 (*p_drv)->generation++;
617 if (periph->periph_dtor != NULL)
618 periph->periph_dtor(periph);
619 xpt_remove_periph(periph);
620 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
622 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
626 switch (periph->deferred_ac) {
627 case AC_FOUND_DEVICE:
628 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
629 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
633 case AC_PATH_REGISTERED:
634 ccb.ccb_h.func_code = XPT_PATH_INQ;
635 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
643 periph->deferred_callback(NULL, periph->deferred_ac,
646 xpt_free_path(periph->path);
647 free(periph, M_CAMPERIPH);
652 * Map user virtual pointers into kernel virtual address space, so we can
653 * access the memory. This won't work on physical pointers, for now it's
654 * up to the caller to check for that. (XXX KDM -- should we do that here
655 * instead?) This also only works for up to MAXPHYS memory. Since we use
656 * buffers to map stuff in and out, we're limited to the buffer size.
659 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
662 int flags[CAM_PERIPH_MAXMAPS];
663 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
664 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
665 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
666 /* Some controllers may not be able to handle more data. */
667 size_t maxmap = DFLTPHYS;
669 switch(ccb->ccb_h.func_code) {
671 if (ccb->cdm.match_buf_len == 0) {
672 printf("cam_periph_mapmem: invalid match buffer "
676 if (ccb->cdm.pattern_buf_len > 0) {
677 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
678 lengths[0] = ccb->cdm.pattern_buf_len;
679 dirs[0] = CAM_DIR_OUT;
680 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
681 lengths[1] = ccb->cdm.match_buf_len;
682 dirs[1] = CAM_DIR_IN;
685 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
686 lengths[0] = ccb->cdm.match_buf_len;
687 dirs[0] = CAM_DIR_IN;
691 * This request will not go to the hardware, no reason
692 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
697 case XPT_CONT_TARGET_IO:
698 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
701 data_ptrs[0] = &ccb->csio.data_ptr;
702 lengths[0] = ccb->csio.dxfer_len;
703 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
707 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
710 data_ptrs[0] = &ccb->ataio.data_ptr;
711 lengths[0] = ccb->ataio.dxfer_len;
712 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
716 data_ptrs[0] = &ccb->smpio.smp_request;
717 lengths[0] = ccb->smpio.smp_request_len;
718 dirs[0] = CAM_DIR_OUT;
719 data_ptrs[1] = &ccb->smpio.smp_response;
720 lengths[1] = ccb->smpio.smp_response_len;
721 dirs[1] = CAM_DIR_IN;
724 case XPT_DEV_ADVINFO:
725 if (ccb->cdai.bufsiz == 0)
728 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
729 lengths[0] = ccb->cdai.bufsiz;
730 dirs[0] = CAM_DIR_IN;
734 * This request will not go to the hardware, no reason
735 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
741 break; /* NOTREACHED */
745 * Check the transfer length and permissions first, so we don't
746 * have to unmap any previously mapped buffers.
748 for (i = 0; i < numbufs; i++) {
753 * The userland data pointer passed in may not be page
754 * aligned. vmapbuf() truncates the address to a page
755 * boundary, so if the address isn't page aligned, we'll
756 * need enough space for the given transfer length, plus
757 * whatever extra space is necessary to make it to the page
761 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){
762 printf("cam_periph_mapmem: attempt to map %lu bytes, "
763 "which is greater than %lu\n",
765 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
770 if (dirs[i] & CAM_DIR_OUT) {
771 flags[i] = BIO_WRITE;
774 if (dirs[i] & CAM_DIR_IN) {
780 /* this keeps the current process from getting swapped */
782 * XXX KDM should I use P_NOSWAP instead?
786 for (i = 0; i < numbufs; i++) {
790 mapinfo->bp[i] = getpbuf(NULL);
792 /* save the buffer's data address */
793 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
795 /* put our pointer in the data slot */
796 mapinfo->bp[i]->b_data = *data_ptrs[i];
798 /* set the transfer length, we know it's < MAXPHYS */
799 mapinfo->bp[i]->b_bufsize = lengths[i];
801 /* set the direction */
802 mapinfo->bp[i]->b_iocmd = flags[i];
805 * Map the buffer into kernel memory.
807 * Note that useracc() alone is not a sufficient test.
808 * vmapbuf() can still fail due to a smaller file mapped
809 * into a larger area of VM, or if userland races against
810 * vmapbuf() after the useracc() check.
812 if (vmapbuf(mapinfo->bp[i]) < 0) {
813 for (j = 0; j < i; ++j) {
814 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
815 vunmapbuf(mapinfo->bp[j]);
816 relpbuf(mapinfo->bp[j], NULL);
818 relpbuf(mapinfo->bp[i], NULL);
823 /* set our pointer to the new mapped area */
824 *data_ptrs[i] = mapinfo->bp[i]->b_data;
826 mapinfo->num_bufs_used++;
830 * Now that we've gotten this far, change ownership to the kernel
831 * of the buffers so that we don't run afoul of returning to user
832 * space with locks (on the buffer) held.
834 for (i = 0; i < numbufs; i++) {
835 BUF_KERNPROC(mapinfo->bp[i]);
843 * Unmap memory segments mapped into kernel virtual address space by
844 * cam_periph_mapmem().
847 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
850 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
852 if (mapinfo->num_bufs_used <= 0) {
853 /* allow ourselves to be swapped once again */
858 switch (ccb->ccb_h.func_code) {
860 numbufs = min(mapinfo->num_bufs_used, 2);
863 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
865 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
866 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
870 case XPT_CONT_TARGET_IO:
871 data_ptrs[0] = &ccb->csio.data_ptr;
872 numbufs = min(mapinfo->num_bufs_used, 1);
875 data_ptrs[0] = &ccb->ataio.data_ptr;
876 numbufs = min(mapinfo->num_bufs_used, 1);
879 numbufs = min(mapinfo->num_bufs_used, 2);
880 data_ptrs[0] = &ccb->smpio.smp_request;
881 data_ptrs[1] = &ccb->smpio.smp_response;
883 case XPT_DEV_ADVINFO:
884 numbufs = min(mapinfo->num_bufs_used, 1);
885 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
888 /* allow ourselves to be swapped once again */
891 break; /* NOTREACHED */
894 for (i = 0; i < numbufs; i++) {
895 /* Set the user's pointer back to the original value */
896 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
898 /* unmap the buffer */
899 vunmapbuf(mapinfo->bp[i]);
901 /* release the buffer */
902 relpbuf(mapinfo->bp[i], NULL);
905 /* allow ourselves to be swapped once again */
910 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
912 struct ccb_hdr *ccb_h;
914 mtx_assert(periph->sim->mtx, MA_OWNED);
915 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
917 while (SLIST_FIRST(&periph->ccb_list) == NULL) {
918 if (periph->immediate_priority > priority)
919 periph->immediate_priority = priority;
920 xpt_schedule(periph, priority);
921 if ((SLIST_FIRST(&periph->ccb_list) != NULL)
922 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
924 mtx_assert(periph->sim->mtx, MA_OWNED);
925 mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb",
929 ccb_h = SLIST_FIRST(&periph->ccb_list);
930 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
931 return ((union ccb *)ccb_h);
935 cam_periph_ccbwait(union ccb *ccb)
939 sim = xpt_path_sim(ccb->ccb_h.path);
940 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
941 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
942 mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0);
946 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
947 int (*error_routine)(union ccb *ccb,
949 u_int32_t sense_flags))
959 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
960 xpt_setup_ccb(&ccb->ccb_h,
962 CAM_PRIORITY_NORMAL);
963 ccb->ccb_h.func_code = XPT_GDEVLIST;
966 * Basically, the point of this is that we go through
967 * getting the list of devices, until we find a passthrough
968 * device. In the current version of the CAM code, the
969 * only way to determine what type of device we're dealing
970 * with is by its name.
974 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
975 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
977 /* we want the next device in the list */
979 if (strncmp(ccb->cgdl.periph_name,
985 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
987 ccb->cgdl.periph_name[0] = '\0';
988 ccb->cgdl.unit_number = 0;
993 /* copy the result back out */
994 bcopy(ccb, addr, sizeof(union ccb));
996 /* and release the ccb */
997 xpt_release_ccb(ccb);
1008 cam_periph_runccb(union ccb *ccb,
1009 int (*error_routine)(union ccb *ccb,
1011 u_int32_t sense_flags),
1012 cam_flags camflags, u_int32_t sense_flags,
1015 struct cam_sim *sim;
1019 sim = xpt_path_sim(ccb->ccb_h.path);
1020 mtx_assert(sim->mtx, MA_OWNED);
1023 * If the user has supplied a stats structure, and if we understand
1024 * this particular type of ccb, record the transaction start.
1026 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1027 ccb->ccb_h.func_code == XPT_ATA_IO))
1028 devstat_start_transaction(ds, NULL);
1033 cam_periph_ccbwait(ccb);
1034 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1036 else if (error_routine != NULL)
1037 error = (*error_routine)(ccb, camflags, sense_flags);
1041 } while (error == ERESTART);
1043 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1044 cam_release_devq(ccb->ccb_h.path,
1045 /* relsim_flags */0,
1048 /* getcount_only */ FALSE);
1049 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1053 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1054 devstat_end_transaction(ds,
1055 ccb->csio.dxfer_len,
1056 ccb->csio.tag_action & 0x3,
1057 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
1058 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
1059 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1061 DEVSTAT_READ, NULL, NULL);
1062 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1063 devstat_end_transaction(ds,
1064 ccb->ataio.dxfer_len,
1065 ccb->ataio.tag_action & 0x3,
1066 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
1067 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
1068 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1070 DEVSTAT_READ, NULL, NULL);
1078 cam_freeze_devq(struct cam_path *path)
1081 cam_freeze_devq_arg(path, 0, 0);
1085 cam_freeze_devq_arg(struct cam_path *path, uint32_t flags, uint32_t arg)
1087 struct ccb_relsim crs;
1089 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NONE);
1090 crs.ccb_h.func_code = XPT_FREEZE_QUEUE;
1091 crs.release_flags = flags;
1093 crs.release_timeout = arg;
1094 xpt_action((union ccb *)&crs);
1098 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1099 u_int32_t openings, u_int32_t arg,
1102 struct ccb_relsim crs;
1104 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1105 crs.ccb_h.func_code = XPT_REL_SIMQ;
1106 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1107 crs.release_flags = relsim_flags;
1108 crs.openings = openings;
1109 crs.release_timeout = arg;
1110 xpt_action((union ccb *)&crs);
1111 return (crs.qfrozen_cnt);
1114 #define saved_ccb_ptr ppriv_ptr0
1116 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1118 union ccb *saved_ccb;
1120 struct scsi_start_stop_unit *scsi_cmd;
1122 scsi_cmd = (struct scsi_start_stop_unit *)
1123 &done_ccb->csio.cdb_io.cdb_bytes;
1124 status = done_ccb->ccb_h.status;
1126 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1127 if ((status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR &&
1128 (status & CAM_AUTOSNS_VALID)) {
1129 struct scsi_sense_data *sense;
1130 int error_code, sense_key, asc, ascq, sense_len;
1132 sense = &done_ccb->csio.sense_data;
1133 sense_len = done_ccb->csio.sense_len -
1134 done_ccb->csio.sense_resid;
1135 scsi_extract_sense_len(sense, sense_len, &error_code,
1136 &sense_key, &asc, &ascq, /*show_errors*/ 1);
1138 * If the error is "invalid field in CDB",
1139 * and the load/eject flag is set, turn the
1140 * flag off and try again. This is just in
1141 * case the drive in question barfs on the
1142 * load eject flag. The CAM code should set
1143 * the load/eject flag by default for
1146 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1147 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1148 (asc == 0x24) && (ascq == 0x00)) {
1149 scsi_cmd->how &= ~SSS_LOEJ;
1150 if (status & CAM_DEV_QFRZN) {
1151 cam_release_devq(done_ccb->ccb_h.path,
1153 done_ccb->ccb_h.status &=
1156 xpt_action(done_ccb);
1160 if (cam_periph_error(done_ccb,
1161 0, SF_RETRY_UA | SF_NO_PRINT, NULL) == ERESTART)
1163 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1164 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1165 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1169 * If we have successfully taken a device from the not
1170 * ready to ready state, re-scan the device and re-get
1171 * the inquiry information. Many devices (mostly disks)
1172 * don't properly report their inquiry information unless
1175 if (scsi_cmd->opcode == START_STOP_UNIT)
1176 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1180 * Perform the final retry with the original CCB so that final
1181 * error processing is performed by the owner of the CCB.
1183 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1184 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
1185 xpt_free_ccb(saved_ccb);
1186 if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1187 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1188 xpt_action(done_ccb);
1191 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1192 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1196 * Generic Async Event handler. Peripheral drivers usually
1197 * filter out the events that require personal attention,
1198 * and leave the rest to this function.
1201 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1202 struct cam_path *path, void *arg)
1205 case AC_LOST_DEVICE:
1206 cam_periph_invalidate(periph);
1214 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1216 struct ccb_getdevstats cgds;
1218 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1219 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1220 xpt_action((union ccb *)&cgds);
1221 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1225 cam_periph_freeze_after_event(struct cam_periph *periph,
1226 struct timeval* event_time, u_int duration_ms)
1228 struct timeval delta;
1229 struct timeval duration_tv;
1232 timevalsub(&delta, event_time);
1233 duration_tv.tv_sec = duration_ms / 1000;
1234 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1235 if (timevalcmp(&delta, &duration_tv, <)) {
1236 timevalsub(&duration_tv, &delta);
1238 duration_ms = duration_tv.tv_sec * 1000;
1239 duration_ms += duration_tv.tv_usec / 1000;
1240 cam_freeze_devq(periph->path);
1241 cam_release_devq(periph->path,
1242 RELSIM_RELEASE_AFTER_TIMEOUT,
1244 /*timeout*/duration_ms,
1245 /*getcount_only*/0);
1251 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1252 cam_flags camflags, u_int32_t sense_flags,
1253 int *openings, u_int32_t *relsim_flags,
1254 u_int32_t *timeout, int *print, const char **action_string)
1258 switch (ccb->csio.scsi_status) {
1259 case SCSI_STATUS_OK:
1260 case SCSI_STATUS_COND_MET:
1261 case SCSI_STATUS_INTERMED:
1262 case SCSI_STATUS_INTERMED_COND_MET:
1265 case SCSI_STATUS_CMD_TERMINATED:
1266 case SCSI_STATUS_CHECK_COND:
1267 error = camperiphscsisenseerror(ccb, orig_ccb,
1276 case SCSI_STATUS_QUEUE_FULL:
1279 struct ccb_getdevstats cgds;
1282 * First off, find out what the current
1283 * transaction counts are.
1285 xpt_setup_ccb(&cgds.ccb_h,
1287 CAM_PRIORITY_NORMAL);
1288 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1289 xpt_action((union ccb *)&cgds);
1292 * If we were the only transaction active, treat
1293 * the QUEUE FULL as if it were a BUSY condition.
1295 if (cgds.dev_active != 0) {
1299 * Reduce the number of openings to
1300 * be 1 less than the amount it took
1301 * to get a queue full bounded by the
1302 * minimum allowed tag count for this
1305 total_openings = cgds.dev_active + cgds.dev_openings;
1306 *openings = cgds.dev_active;
1307 if (*openings < cgds.mintags)
1308 *openings = cgds.mintags;
1309 if (*openings < total_openings)
1310 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1313 * Some devices report queue full for
1314 * temporary resource shortages. For
1315 * this reason, we allow a minimum
1316 * tag count to be entered via a
1317 * quirk entry to prevent the queue
1318 * count on these devices from falling
1319 * to a pessimisticly low value. We
1320 * still wait for the next successful
1321 * completion, however, before queueing
1322 * more transactions to the device.
1324 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1332 case SCSI_STATUS_BUSY:
1334 * Restart the queue after either another
1335 * command completes or a 1 second timeout.
1337 if (ccb->ccb_h.retry_count > 0) {
1338 ccb->ccb_h.retry_count--;
1340 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1341 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1347 case SCSI_STATUS_RESERV_CONFLICT:
1356 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1357 cam_flags camflags, u_int32_t sense_flags,
1358 int *openings, u_int32_t *relsim_flags,
1359 u_int32_t *timeout, int *print, const char **action_string)
1361 struct cam_periph *periph;
1362 union ccb *orig_ccb = ccb;
1363 int error, recoveryccb;
1365 periph = xpt_path_periph(ccb->ccb_h.path);
1366 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1367 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1369 * If error recovery is already in progress, don't attempt
1370 * to process this error, but requeue it unconditionally
1371 * and attempt to process it once error recovery has
1372 * completed. This failed command is probably related to
1373 * the error that caused the currently active error recovery
1374 * action so our current recovery efforts should also
1375 * address this command. Be aware that the error recovery
1376 * code assumes that only one recovery action is in progress
1377 * on a particular peripheral instance at any given time
1378 * (e.g. only one saved CCB for error recovery) so it is
1379 * imperitive that we don't violate this assumption.
1384 scsi_sense_action err_action;
1385 struct ccb_getdev cgd;
1388 * Grab the inquiry data for this device.
1390 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1391 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1392 xpt_action((union ccb *)&cgd);
1394 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1395 err_action = scsi_error_action(&ccb->csio,
1399 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1400 error = err_action & SS_ERRMASK;
1403 * Do not autostart sequential access devices
1404 * to avoid unexpected tape loading.
1406 if ((err_action & SS_MASK) == SS_START &&
1407 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1408 *action_string = "Will not autostart a "
1409 "sequential access device";
1410 goto sense_error_done;
1414 * Avoid recovery recursion if recovery action is the same.
1416 if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1417 if (((err_action & SS_MASK) == SS_START &&
1418 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1419 ((err_action & SS_MASK) == SS_TUR &&
1420 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1421 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1422 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1428 * If the recovery action will consume a retry,
1429 * make sure we actually have retries available.
1431 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1432 if (ccb->ccb_h.retry_count > 0 &&
1433 (periph->flags & CAM_PERIPH_INVALID) == 0)
1434 ccb->ccb_h.retry_count--;
1436 *action_string = "Retries exhausted";
1437 goto sense_error_done;
1441 if ((err_action & SS_MASK) >= SS_START) {
1443 * Do common portions of commands that
1444 * use recovery CCBs.
1446 orig_ccb = xpt_alloc_ccb_nowait();
1447 if (orig_ccb == NULL) {
1448 *action_string = "Can't allocate recovery CCB";
1449 goto sense_error_done;
1452 * Clear freeze flag for original request here, as
1453 * this freeze will be dropped as part of ERESTART.
1455 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1456 bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
1459 switch (err_action & SS_MASK) {
1461 *action_string = "No recovery action needed";
1465 *action_string = "Retrying command (per sense data)";
1469 *action_string = "Unretryable error";
1476 * Send a start unit command to the device, and
1477 * then retry the command.
1479 *action_string = "Attempting to start unit";
1480 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1483 * Check for removable media and set
1484 * load/eject flag appropriately.
1486 if (SID_IS_REMOVABLE(&cgd.inq_data))
1491 scsi_start_stop(&ccb->csio,
1505 * Send a Test Unit Ready to the device.
1506 * If the 'many' flag is set, we send 120
1507 * test unit ready commands, one every half
1508 * second. Otherwise, we just send one TUR.
1509 * We only want to do this if the retry
1510 * count has not been exhausted.
1514 if ((err_action & SSQ_MANY) != 0) {
1515 *action_string = "Polling device for readiness";
1518 *action_string = "Testing device for readiness";
1521 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1522 scsi_test_unit_ready(&ccb->csio,
1530 * Accomplish our 500ms delay by deferring
1531 * the release of our device queue appropriately.
1533 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1538 panic("Unhandled error action %x", err_action);
1541 if ((err_action & SS_MASK) >= SS_START) {
1543 * Drop the priority, so that the recovery
1544 * CCB is the first to execute. Freeze the queue
1545 * after this command is sent so that we can
1546 * restore the old csio and have it queued in
1547 * the proper order before we release normal
1548 * transactions to the device.
1550 ccb->ccb_h.pinfo.priority--;
1551 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1552 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1558 *print = ((err_action & SSQ_PRINT_SENSE) != 0);
1564 * Generic error handler. Peripheral drivers usually filter
1565 * out the errors that they handle in a unique mannor, then
1566 * call this function.
1569 cam_periph_error(union ccb *ccb, cam_flags camflags,
1570 u_int32_t sense_flags, union ccb *save_ccb)
1572 union ccb *orig_ccb;
1573 struct cam_periph *periph;
1574 const char *action_string;
1576 int frozen, error, openings, print, lost_device;
1577 u_int32_t relsim_flags, timeout;
1580 periph = xpt_path_periph(ccb->ccb_h.path);
1581 action_string = NULL;
1582 status = ccb->ccb_h.status;
1583 frozen = (status & CAM_DEV_QFRZN) != 0;
1584 status &= CAM_STATUS_MASK;
1585 openings = relsim_flags = timeout = lost_device = 0;
1593 case CAM_SCSI_STATUS_ERROR:
1594 error = camperiphscsistatuserror(ccb, &orig_ccb,
1595 camflags, sense_flags, &openings, &relsim_flags,
1596 &timeout, &print, &action_string);
1598 case CAM_AUTOSENSE_FAIL:
1599 error = EIO; /* we have to kill the command */
1603 case CAM_MSG_REJECT_REC:
1604 /* XXX Don't know that these are correct */
1607 case CAM_SEL_TIMEOUT:
1608 if ((camflags & CAM_RETRY_SELTO) != 0) {
1609 if (ccb->ccb_h.retry_count > 0 &&
1610 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1611 ccb->ccb_h.retry_count--;
1615 * Wait a bit to give the device
1616 * time to recover before we try again.
1618 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1619 timeout = periph_selto_delay;
1622 action_string = "Retries exhausted";
1625 case CAM_DEV_NOT_THERE:
1630 case CAM_REQ_INVALID:
1631 case CAM_PATH_INVALID:
1633 case CAM_PROVIDE_FAIL:
1634 case CAM_REQ_TOO_BIG:
1635 case CAM_LUN_INVALID:
1636 case CAM_TID_INVALID:
1639 case CAM_SCSI_BUS_RESET:
1642 * Commands that repeatedly timeout and cause these
1643 * kinds of error recovery actions, should return
1644 * CAM_CMD_TIMEOUT, which allows us to safely assume
1645 * that this command was an innocent bystander to
1646 * these events and should be unconditionally
1649 case CAM_REQUEUE_REQ:
1650 /* Unconditional requeue if device is still there */
1651 if (periph->flags & CAM_PERIPH_INVALID) {
1652 action_string = "Periph was invalidated";
1654 } else if (sense_flags & SF_NO_RETRY) {
1656 action_string = "Retry was blocked";
1660 case CAM_RESRC_UNAVAIL:
1661 /* Wait a bit for the resource shortage to abate. */
1662 timeout = periph_noresrc_delay;
1666 /* Wait a bit for the busy condition to abate. */
1667 timeout = periph_busy_delay;
1669 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1671 case CAM_ATA_STATUS_ERROR:
1672 case CAM_REQ_CMP_ERR:
1673 case CAM_CMD_TIMEOUT:
1674 case CAM_UNEXP_BUSFREE:
1675 case CAM_UNCOR_PARITY:
1676 case CAM_DATA_RUN_ERR:
1678 if (periph->flags & CAM_PERIPH_INVALID) {
1680 action_string = "Periph was invalidated";
1681 } else if (ccb->ccb_h.retry_count == 0) {
1683 action_string = "Retries exhausted";
1684 } else if (sense_flags & SF_NO_RETRY) {
1686 action_string = "Retry was blocked";
1688 ccb->ccb_h.retry_count--;
1694 if ((sense_flags & SF_PRINT_ALWAYS) ||
1695 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
1697 else if (sense_flags & SF_NO_PRINT)
1700 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1701 if (error != 0 && print) {
1702 if (error != ERESTART) {
1703 if (action_string == NULL)
1704 action_string = "Unretryable error";
1705 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
1706 error, action_string);
1707 } else if (action_string != NULL)
1708 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1710 xpt_print(ccb->ccb_h.path, "Retrying command\n");
1714 struct cam_path *newpath;
1718 * For a selection timeout, we consider all of the LUNs on
1719 * the target to be gone. If the status is CAM_DEV_NOT_THERE,
1720 * then we only get rid of the device(s) specified by the
1721 * path in the original CCB.
1723 if (status == CAM_DEV_NOT_THERE)
1724 lun_id = xpt_path_lun_id(ccb->ccb_h.path);
1726 lun_id = CAM_LUN_WILDCARD;
1728 /* Should we do more if we can't create the path?? */
1729 if (xpt_create_path(&newpath, periph,
1730 xpt_path_path_id(ccb->ccb_h.path),
1731 xpt_path_target_id(ccb->ccb_h.path),
1732 lun_id) == CAM_REQ_CMP) {
1735 * Let peripheral drivers know that this
1736 * device has gone away.
1738 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1739 xpt_free_path(newpath);
1743 /* Attempt a retry */
1744 if (error == ERESTART || error == 0) {
1746 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1747 if (error == ERESTART)
1750 cam_release_devq(ccb->ccb_h.path,
1754 /*getcount_only*/0);