2 * Common functions for CAM "type" (peripheral) drivers.
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/linker_set.h>
41 #include <sys/mutex.h>
44 #include <sys/devicestat.h>
47 #include <vm/vm_extern.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_queue.h>
52 #include <cam/cam_xpt_periph.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_debug.h>
55 #include <cam/cam_sim.h>
57 #include <cam/scsi/scsi_all.h>
58 #include <cam/scsi/scsi_message.h>
59 #include <cam/scsi/scsi_pass.h>
61 static u_int camperiphnextunit(struct periph_driver *p_drv,
62 u_int newunit, int wired,
63 path_id_t pathid, target_id_t target,
65 static u_int camperiphunit(struct periph_driver *p_drv,
66 path_id_t pathid, target_id_t target,
68 static void camperiphdone(struct cam_periph *periph,
70 static void camperiphfree(struct cam_periph *periph);
71 static int camperiphscsistatuserror(union ccb *ccb,
73 u_int32_t sense_flags,
76 u_int32_t *relsim_flags,
78 static int camperiphscsisenseerror(union ccb *ccb,
80 u_int32_t sense_flags,
83 u_int32_t *relsim_flags,
86 static int nperiph_drivers;
87 struct periph_driver **periph_drivers;
89 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
91 static int periph_selto_delay = 1000;
92 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
93 static int periph_noresrc_delay = 500;
94 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
95 static int periph_busy_delay = 500;
96 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
100 periphdriver_register(void *data)
102 struct periph_driver **newdrivers, **old;
105 ndrivers = nperiph_drivers + 2;
106 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
109 bcopy(periph_drivers, newdrivers,
110 sizeof(*newdrivers) * nperiph_drivers);
111 newdrivers[nperiph_drivers] = (struct periph_driver *)data;
112 newdrivers[nperiph_drivers + 1] = NULL;
113 old = periph_drivers;
114 periph_drivers = newdrivers;
116 free(old, M_CAMPERIPH);
121 cam_periph_alloc(periph_ctor_t *periph_ctor,
122 periph_oninv_t *periph_oninvalidate,
123 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
124 char *name, cam_periph_type type, struct cam_path *path,
125 ac_callback_t *ac_callback, ac_code code, void *arg)
127 struct periph_driver **p_drv;
129 struct cam_periph *periph;
130 struct cam_periph *cur_periph;
132 target_id_t target_id;
139 * Handle Hot-Plug scenarios. If there is already a peripheral
140 * of our type assigned to this path, we are likely waiting for
141 * final close on an old, invalidated, peripheral. If this is
142 * the case, queue up a deferred call to the peripheral's async
143 * handler. If it looks like a mistaken re-allocation, complain.
145 if ((periph = cam_periph_find(path, name)) != NULL) {
147 if ((periph->flags & CAM_PERIPH_INVALID) != 0
148 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
149 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
150 periph->deferred_callback = ac_callback;
151 periph->deferred_ac = code;
152 return (CAM_REQ_INPROG);
154 printf("cam_periph_alloc: attempt to re-allocate "
155 "valid device %s%d rejected\n",
156 periph->periph_name, periph->unit_number);
158 return (CAM_REQ_INVALID);
161 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
165 return (CAM_RESRC_UNAVAIL);
170 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
171 if (strcmp((*p_drv)->driver_name, name) == 0)
175 if (*p_drv == NULL) {
176 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
177 free(periph, M_CAMPERIPH);
178 return (CAM_REQ_INVALID);
181 sim = xpt_path_sim(path);
182 path_id = xpt_path_path_id(path);
183 target_id = xpt_path_target_id(path);
184 lun_id = xpt_path_lun_id(path);
185 bzero(periph, sizeof(*periph));
186 cam_init_pinfo(&periph->pinfo);
187 periph->periph_start = periph_start;
188 periph->periph_dtor = periph_dtor;
189 periph->periph_oninval = periph_oninvalidate;
191 periph->periph_name = name;
192 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
193 periph->immediate_priority = CAM_PRIORITY_NONE;
194 periph->refcount = 0;
196 SLIST_INIT(&periph->ccb_list);
197 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
198 if (status != CAM_REQ_CMP)
204 status = xpt_add_periph(periph);
206 if (status != CAM_REQ_CMP)
209 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
210 while (cur_periph != NULL
211 && cur_periph->unit_number < periph->unit_number)
212 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
214 if (cur_periph != NULL)
215 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
217 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
218 (*p_drv)->generation++;
223 status = periph_ctor(periph, arg);
225 if (status == CAM_REQ_CMP)
229 switch (init_level) {
231 /* Initialized successfully */
234 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
235 xpt_remove_periph(periph);
238 xpt_free_path(periph->path);
241 free(periph, M_CAMPERIPH);
244 /* No cleanup to perform. */
247 panic("cam_periph_alloc: Unkown init level");
253 * Find a peripheral structure with the specified path, target, lun,
254 * and (optionally) type. If the name is NULL, this function will return
255 * the first peripheral driver that matches the specified path.
258 cam_periph_find(struct cam_path *path, char *name)
260 struct periph_driver **p_drv;
261 struct cam_periph *periph;
264 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
266 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
269 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
270 if (xpt_path_comp(periph->path, path) == 0) {
285 cam_periph_acquire(struct cam_periph *periph)
289 return(CAM_REQ_CMP_ERR);
299 cam_periph_release_locked(struct cam_periph *periph)
306 if ((--periph->refcount == 0)
307 && (periph->flags & CAM_PERIPH_INVALID)) {
308 camperiphfree(periph);
314 cam_periph_release(struct cam_periph *periph)
322 mtx_assert(sim->mtx, MA_NOTOWNED);
324 cam_periph_release_locked(periph);
325 mtx_unlock(sim->mtx);
329 cam_periph_hold(struct cam_periph *periph, int priority)
334 * Increment the reference count on the peripheral
335 * while we wait for our lock attempt to succeed
336 * to ensure the peripheral doesn't disappear out
337 * from user us while we sleep.
340 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
343 mtx_assert(periph->sim->mtx, MA_OWNED);
344 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
345 periph->flags |= CAM_PERIPH_LOCK_WANTED;
346 if ((error = mtx_sleep(periph, periph->sim->mtx, priority,
347 "caplck", 0)) != 0) {
348 cam_periph_release_locked(periph);
353 periph->flags |= CAM_PERIPH_LOCKED;
358 cam_periph_unhold(struct cam_periph *periph)
361 mtx_assert(periph->sim->mtx, MA_OWNED);
363 periph->flags &= ~CAM_PERIPH_LOCKED;
364 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
365 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
369 cam_periph_release_locked(periph);
373 * Look for the next unit number that is not currently in use for this
374 * peripheral type starting at "newunit". Also exclude unit numbers that
375 * are reserved by for future "hardwiring" unless we already know that this
376 * is a potential wired device. Only assume that the device is "wired" the
377 * first time through the loop since after that we'll be looking at unit
378 * numbers that did not match a wiring entry.
381 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
382 path_id_t pathid, target_id_t target, lun_id_t lun)
384 struct cam_periph *periph;
386 int i, val, dunit, r;
387 const char *dname, *strval;
389 periph_name = p_drv->driver_name;
392 for (periph = TAILQ_FIRST(&p_drv->units);
393 periph != NULL && periph->unit_number != newunit;
394 periph = TAILQ_NEXT(periph, unit_links))
397 if (periph != NULL && periph->unit_number == newunit) {
399 xpt_print(periph->path, "Duplicate Wired "
401 xpt_print(periph->path, "Second device (%s "
402 "device at scbus%d target %d lun %d) will "
403 "not be wired\n", periph_name, pathid,
413 * Don't match entries like "da 4" as a wired down
414 * device, but do match entries like "da 4 target 5"
415 * or even "da 4 scbus 1".
420 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
423 /* if no "target" and no specific scbus, skip */
424 if (resource_int_value(dname, dunit, "target", &val) &&
425 (resource_string_value(dname, dunit, "at",&strval)||
426 strcmp(strval, "scbus") == 0))
428 if (newunit == dunit)
438 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
439 target_id_t target, lun_id_t lun)
442 int wired, i, val, dunit;
443 const char *dname, *strval;
444 char pathbuf[32], *periph_name;
446 periph_name = p_drv->driver_name;
447 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
451 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
453 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
454 if (strcmp(strval, pathbuf) != 0)
458 if (resource_int_value(dname, dunit, "target", &val) == 0) {
463 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
475 * Either start from 0 looking for the next unit or from
476 * the unit number given in the resource config. This way,
477 * if we have wildcard matches, we don't return the same
480 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
486 cam_periph_invalidate(struct cam_periph *periph)
490 * We only call this routine the first time a peripheral is
493 if (((periph->flags & CAM_PERIPH_INVALID) == 0)
494 && (periph->periph_oninval != NULL))
495 periph->periph_oninval(periph);
497 periph->flags |= CAM_PERIPH_INVALID;
498 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
501 if (periph->refcount == 0)
502 camperiphfree(periph);
503 else if (periph->refcount < 0)
504 printf("cam_invalidate_periph: refcount < 0!!\n");
509 camperiphfree(struct cam_periph *periph)
511 struct periph_driver **p_drv;
513 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
514 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
517 if (*p_drv == NULL) {
518 printf("camperiphfree: attempt to free non-existant periph\n");
522 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
523 (*p_drv)->generation++;
526 if (periph->periph_dtor != NULL)
527 periph->periph_dtor(periph);
528 xpt_remove_periph(periph);
530 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
534 switch (periph->deferred_ac) {
535 case AC_FOUND_DEVICE:
536 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
537 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
541 case AC_PATH_REGISTERED:
542 ccb.ccb_h.func_code = XPT_PATH_INQ;
543 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1);
551 periph->deferred_callback(NULL, periph->deferred_ac,
554 xpt_free_path(periph->path);
555 free(periph, M_CAMPERIPH);
560 * Map user virtual pointers into kernel virtual address space, so we can
561 * access the memory. This won't work on physical pointers, for now it's
562 * up to the caller to check for that. (XXX KDM -- should we do that here
563 * instead?) This also only works for up to MAXPHYS memory. Since we use
564 * buffers to map stuff in and out, we're limited to the buffer size.
567 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
570 int flags[CAM_PERIPH_MAXMAPS];
571 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
572 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
573 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
574 /* Some controllers may not be able to handle more data. */
575 size_t maxmap = DFLTPHYS;
577 switch(ccb->ccb_h.func_code) {
579 if (ccb->cdm.match_buf_len == 0) {
580 printf("cam_periph_mapmem: invalid match buffer "
584 if (ccb->cdm.pattern_buf_len > 0) {
585 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
586 lengths[0] = ccb->cdm.pattern_buf_len;
587 dirs[0] = CAM_DIR_OUT;
588 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
589 lengths[1] = ccb->cdm.match_buf_len;
590 dirs[1] = CAM_DIR_IN;
593 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
594 lengths[0] = ccb->cdm.match_buf_len;
595 dirs[0] = CAM_DIR_IN;
599 * This request will not go to the hardware, no reason
600 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
605 case XPT_CONT_TARGET_IO:
606 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
609 data_ptrs[0] = &ccb->csio.data_ptr;
610 lengths[0] = ccb->csio.dxfer_len;
611 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
615 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
618 data_ptrs[0] = &ccb->ataio.data_ptr;
619 lengths[0] = ccb->ataio.dxfer_len;
620 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
625 break; /* NOTREACHED */
629 * Check the transfer length and permissions first, so we don't
630 * have to unmap any previously mapped buffers.
632 for (i = 0; i < numbufs; i++) {
637 * The userland data pointer passed in may not be page
638 * aligned. vmapbuf() truncates the address to a page
639 * boundary, so if the address isn't page aligned, we'll
640 * need enough space for the given transfer length, plus
641 * whatever extra space is necessary to make it to the page
645 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){
646 printf("cam_periph_mapmem: attempt to map %lu bytes, "
647 "which is greater than %lu\n",
649 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
654 if (dirs[i] & CAM_DIR_OUT) {
655 flags[i] = BIO_WRITE;
658 if (dirs[i] & CAM_DIR_IN) {
664 /* this keeps the current process from getting swapped */
666 * XXX KDM should I use P_NOSWAP instead?
670 for (i = 0; i < numbufs; i++) {
674 mapinfo->bp[i] = getpbuf(NULL);
676 /* save the buffer's data address */
677 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
679 /* put our pointer in the data slot */
680 mapinfo->bp[i]->b_data = *data_ptrs[i];
682 /* set the transfer length, we know it's < MAXPHYS */
683 mapinfo->bp[i]->b_bufsize = lengths[i];
685 /* set the direction */
686 mapinfo->bp[i]->b_iocmd = flags[i];
689 * Map the buffer into kernel memory.
691 * Note that useracc() alone is not a sufficient test.
692 * vmapbuf() can still fail due to a smaller file mapped
693 * into a larger area of VM, or if userland races against
694 * vmapbuf() after the useracc() check.
696 if (vmapbuf(mapinfo->bp[i]) < 0) {
697 for (j = 0; j < i; ++j) {
698 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
699 vunmapbuf(mapinfo->bp[j]);
700 relpbuf(mapinfo->bp[j], NULL);
702 relpbuf(mapinfo->bp[i], NULL);
707 /* set our pointer to the new mapped area */
708 *data_ptrs[i] = mapinfo->bp[i]->b_data;
710 mapinfo->num_bufs_used++;
714 * Now that we've gotten this far, change ownership to the kernel
715 * of the buffers so that we don't run afoul of returning to user
716 * space with locks (on the buffer) held.
718 for (i = 0; i < numbufs; i++) {
719 BUF_KERNPROC(mapinfo->bp[i]);
727 * Unmap memory segments mapped into kernel virtual address space by
728 * cam_periph_mapmem().
731 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
734 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
736 if (mapinfo->num_bufs_used <= 0) {
737 /* allow ourselves to be swapped once again */
742 switch (ccb->ccb_h.func_code) {
744 numbufs = min(mapinfo->num_bufs_used, 2);
747 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
749 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
750 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
754 case XPT_CONT_TARGET_IO:
755 data_ptrs[0] = &ccb->csio.data_ptr;
756 numbufs = min(mapinfo->num_bufs_used, 1);
759 data_ptrs[0] = &ccb->ataio.data_ptr;
760 numbufs = min(mapinfo->num_bufs_used, 1);
763 /* allow ourselves to be swapped once again */
766 break; /* NOTREACHED */
769 for (i = 0; i < numbufs; i++) {
770 /* Set the user's pointer back to the original value */
771 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
773 /* unmap the buffer */
774 vunmapbuf(mapinfo->bp[i]);
776 /* release the buffer */
777 relpbuf(mapinfo->bp[i], NULL);
780 /* allow ourselves to be swapped once again */
785 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority)
787 struct ccb_hdr *ccb_h;
789 mtx_assert(periph->sim->mtx, MA_OWNED);
790 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n"));
792 while (SLIST_FIRST(&periph->ccb_list) == NULL) {
793 if (periph->immediate_priority > priority)
794 periph->immediate_priority = priority;
795 xpt_schedule(periph, priority);
796 if ((SLIST_FIRST(&periph->ccb_list) != NULL)
797 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority))
799 mtx_assert(periph->sim->mtx, MA_OWNED);
800 mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb",
804 ccb_h = SLIST_FIRST(&periph->ccb_list);
805 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
806 return ((union ccb *)ccb_h);
810 cam_periph_ccbwait(union ccb *ccb)
814 sim = xpt_path_sim(ccb->ccb_h.path);
815 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
816 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
817 mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0);
821 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
822 int (*error_routine)(union ccb *ccb,
824 u_int32_t sense_flags))
834 ccb = cam_periph_getccb(periph, /* priority */ 1);
835 xpt_setup_ccb(&ccb->ccb_h,
838 ccb->ccb_h.func_code = XPT_GDEVLIST;
841 * Basically, the point of this is that we go through
842 * getting the list of devices, until we find a passthrough
843 * device. In the current version of the CAM code, the
844 * only way to determine what type of device we're dealing
845 * with is by its name.
849 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
850 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
852 /* we want the next device in the list */
854 if (strncmp(ccb->cgdl.periph_name,
860 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
862 ccb->cgdl.periph_name[0] = '\0';
863 ccb->cgdl.unit_number = 0;
868 /* copy the result back out */
869 bcopy(ccb, addr, sizeof(union ccb));
871 /* and release the ccb */
872 xpt_release_ccb(ccb);
883 cam_periph_runccb(union ccb *ccb,
884 int (*error_routine)(union ccb *ccb,
886 u_int32_t sense_flags),
887 cam_flags camflags, u_int32_t sense_flags,
894 sim = xpt_path_sim(ccb->ccb_h.path);
895 mtx_assert(sim->mtx, MA_OWNED);
898 * If the user has supplied a stats structure, and if we understand
899 * this particular type of ccb, record the transaction start.
901 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
902 devstat_start_transaction(ds, NULL);
907 cam_periph_ccbwait(ccb);
908 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
910 else if (error_routine != NULL)
911 error = (*error_routine)(ccb, camflags, sense_flags);
915 } while (error == ERESTART);
917 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
918 cam_release_devq(ccb->ccb_h.path,
922 /* getcount_only */ FALSE);
924 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO))
925 devstat_end_transaction(ds,
927 ccb->csio.tag_action & 0xf,
928 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
929 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
930 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
932 DEVSTAT_READ, NULL, NULL);
938 cam_freeze_devq(struct cam_path *path)
940 struct ccb_hdr ccb_h;
942 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
943 ccb_h.func_code = XPT_NOOP;
944 ccb_h.flags = CAM_DEV_QFREEZE;
945 xpt_action((union ccb *)&ccb_h);
949 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
950 u_int32_t openings, u_int32_t timeout,
953 struct ccb_relsim crs;
955 xpt_setup_ccb(&crs.ccb_h, path,
957 crs.ccb_h.func_code = XPT_REL_SIMQ;
958 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
959 crs.release_flags = relsim_flags;
960 crs.openings = openings;
961 crs.release_timeout = timeout;
962 xpt_action((union ccb *)&crs);
963 return (crs.qfrozen_cnt);
966 #define saved_ccb_ptr ppriv_ptr0
968 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
970 union ccb *saved_ccb;
974 struct scsi_start_stop_unit *scsi_cmd;
975 u_int32_t relsim_flags, timeout;
976 u_int32_t qfrozen_cnt;
979 xpt_done_ccb = FALSE;
980 status = done_ccb->ccb_h.status;
981 frozen = (status & CAM_DEV_QFRZN) != 0;
982 sense = (status & CAM_AUTOSNS_VALID) != 0;
983 status &= CAM_STATUS_MASK;
987 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
990 * Unfreeze the queue once if it is already frozen..
993 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1004 * If we have successfully taken a device from the not
1005 * ready to ready state, re-scan the device and re-get
1006 * the inquiry information. Many devices (mostly disks)
1007 * don't properly report their inquiry information unless
1010 * If we manually retrieved sense into a CCB and got
1011 * something other than "NO SENSE" send the updated CCB
1012 * back to the client via xpt_done() to be processed via
1013 * the error recovery code again.
1015 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) {
1016 scsi_cmd = (struct scsi_start_stop_unit *)
1017 &done_ccb->csio.cdb_io.cdb_bytes;
1019 if (scsi_cmd->opcode == START_STOP_UNIT)
1020 xpt_async(AC_INQ_CHANGED,
1021 done_ccb->ccb_h.path, NULL);
1022 if (scsi_cmd->opcode == REQUEST_SENSE) {
1025 sense_key = saved_ccb->csio.sense_data.flags;
1026 sense_key &= SSD_KEY;
1027 if (sense_key != SSD_KEY_NO_SENSE) {
1028 saved_ccb->ccb_h.status |=
1031 xpt_print(saved_ccb->ccb_h.path,
1032 "Recovered Sense\n");
1033 scsi_sense_print(&saved_ccb->csio);
1034 cam_error_print(saved_ccb, CAM_ESF_ALL,
1037 xpt_done_ccb = TRUE;
1041 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
1044 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1046 if (xpt_done_ccb == FALSE)
1047 xpt_action(done_ccb);
1051 case CAM_SCSI_STATUS_ERROR:
1052 scsi_cmd = (struct scsi_start_stop_unit *)
1053 &done_ccb->csio.cdb_io.cdb_bytes;
1055 struct ccb_getdev cgd;
1056 struct scsi_sense_data *sense;
1057 int error_code, sense_key, asc, ascq;
1058 scsi_sense_action err_action;
1060 sense = &done_ccb->csio.sense_data;
1061 scsi_extract_sense(sense, &error_code,
1062 &sense_key, &asc, &ascq);
1065 * Grab the inquiry data for this device.
1067 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
1069 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1070 xpt_action((union ccb *)&cgd);
1071 err_action = scsi_error_action(&done_ccb->csio,
1075 * If the error is "invalid field in CDB",
1076 * and the load/eject flag is set, turn the
1077 * flag off and try again. This is just in
1078 * case the drive in question barfs on the
1079 * load eject flag. The CAM code should set
1080 * the load/eject flag by default for
1085 * Should we check to see what the specific
1086 * scsi status is?? Or does it not matter
1087 * since we already know that there was an
1088 * error, and we know what the specific
1089 * error code was, and we know what the
1092 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1093 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1094 (asc == 0x24) && (ascq == 0x00) &&
1095 (done_ccb->ccb_h.retry_count > 0)) {
1097 scsi_cmd->how &= ~SSS_LOEJ;
1099 xpt_action(done_ccb);
1101 } else if ((done_ccb->ccb_h.retry_count > 1)
1102 && ((err_action & SS_MASK) != SS_FAIL)) {
1105 * In this case, the error recovery
1106 * command failed, but we've got
1107 * some retries left on it. Give
1108 * it another try unless this is an
1109 * unretryable error.
1112 /* set the timeout to .5 sec */
1114 RELSIM_RELEASE_AFTER_TIMEOUT;
1117 xpt_action(done_ccb);
1123 * Perform the final retry with the original
1124 * CCB so that final error processing is
1125 * performed by the owner of the CCB.
1127 bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1128 done_ccb, sizeof(union ccb));
1130 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1132 xpt_action(done_ccb);
1136 * Eh?? The command failed, but we don't
1137 * have any sense. What's up with that?
1138 * Fire the CCB again to return it to the
1141 bcopy(done_ccb->ccb_h.saved_ccb_ptr,
1142 done_ccb, sizeof(union ccb));
1144 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1146 xpt_action(done_ccb);
1151 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb,
1154 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1156 xpt_action(done_ccb);
1161 /* decrement the retry count */
1163 * XXX This isn't appropriate in all cases. Restructure,
1164 * so that the retry count is only decremented on an
1165 * actual retry. Remeber that the orignal ccb had its
1166 * retry count dropped before entering recovery, so
1167 * doing it again is a bug.
1169 if (done_ccb->ccb_h.retry_count > 0)
1170 done_ccb->ccb_h.retry_count--;
1172 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path,
1173 /*relsim_flags*/relsim_flags,
1176 /*getcount_only*/0);
1177 if (xpt_done_ccb == TRUE)
1178 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
1182 * Generic Async Event handler. Peripheral drivers usually
1183 * filter out the events that require personal attention,
1184 * and leave the rest to this function.
1187 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1188 struct cam_path *path, void *arg)
1191 case AC_LOST_DEVICE:
1192 cam_periph_invalidate(periph);
1197 cam_periph_bus_settle(periph, scsi_delay);
1206 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1208 struct ccb_getdevstats cgds;
1210 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1);
1211 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1212 xpt_action((union ccb *)&cgds);
1213 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1217 cam_periph_freeze_after_event(struct cam_periph *periph,
1218 struct timeval* event_time, u_int duration_ms)
1220 struct timeval delta;
1221 struct timeval duration_tv;
1224 timevalsub(&delta, event_time);
1225 duration_tv.tv_sec = duration_ms / 1000;
1226 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1227 if (timevalcmp(&delta, &duration_tv, <)) {
1228 timevalsub(&duration_tv, &delta);
1230 duration_ms = duration_tv.tv_sec * 1000;
1231 duration_ms += duration_tv.tv_usec / 1000;
1232 cam_freeze_devq(periph->path);
1233 cam_release_devq(periph->path,
1234 RELSIM_RELEASE_AFTER_TIMEOUT,
1236 /*timeout*/duration_ms,
1237 /*getcount_only*/0);
1243 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags,
1244 u_int32_t sense_flags, union ccb *save_ccb,
1245 int *openings, u_int32_t *relsim_flags,
1250 switch (ccb->csio.scsi_status) {
1251 case SCSI_STATUS_OK:
1252 case SCSI_STATUS_COND_MET:
1253 case SCSI_STATUS_INTERMED:
1254 case SCSI_STATUS_INTERMED_COND_MET:
1257 case SCSI_STATUS_CMD_TERMINATED:
1258 case SCSI_STATUS_CHECK_COND:
1259 error = camperiphscsisenseerror(ccb,
1267 case SCSI_STATUS_QUEUE_FULL:
1270 struct ccb_getdevstats cgds;
1273 * First off, find out what the current
1274 * transaction counts are.
1276 xpt_setup_ccb(&cgds.ccb_h,
1279 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1280 xpt_action((union ccb *)&cgds);
1283 * If we were the only transaction active, treat
1284 * the QUEUE FULL as if it were a BUSY condition.
1286 if (cgds.dev_active != 0) {
1290 * Reduce the number of openings to
1291 * be 1 less than the amount it took
1292 * to get a queue full bounded by the
1293 * minimum allowed tag count for this
1296 total_openings = cgds.dev_active + cgds.dev_openings;
1297 *openings = cgds.dev_active;
1298 if (*openings < cgds.mintags)
1299 *openings = cgds.mintags;
1300 if (*openings < total_openings)
1301 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1304 * Some devices report queue full for
1305 * temporary resource shortages. For
1306 * this reason, we allow a minimum
1307 * tag count to be entered via a
1308 * quirk entry to prevent the queue
1309 * count on these devices from falling
1310 * to a pessimisticly low value. We
1311 * still wait for the next successful
1312 * completion, however, before queueing
1313 * more transactions to the device.
1315 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1320 xpt_print(ccb->ccb_h.path, "Queue Full\n");
1326 case SCSI_STATUS_BUSY:
1328 * Restart the queue after either another
1329 * command completes or a 1 second timeout.
1332 xpt_print(ccb->ccb_h.path, "Device Busy\n");
1334 if (ccb->ccb_h.retry_count > 0) {
1335 ccb->ccb_h.retry_count--;
1337 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1338 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1344 case SCSI_STATUS_RESERV_CONFLICT:
1345 xpt_print(ccb->ccb_h.path, "Reservation Conflict\n");
1349 xpt_print(ccb->ccb_h.path, "SCSI Status 0x%x\n",
1350 ccb->csio.scsi_status);
1358 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags,
1359 u_int32_t sense_flags, union ccb *save_ccb,
1360 int *openings, u_int32_t *relsim_flags,
1363 struct cam_periph *periph;
1366 periph = xpt_path_periph(ccb->ccb_h.path);
1367 if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) {
1370 * If error recovery is already in progress, don't attempt
1371 * to process this error, but requeue it unconditionally
1372 * and attempt to process it once error recovery has
1373 * completed. This failed command is probably related to
1374 * the error that caused the currently active error recovery
1375 * action so our current recovery efforts should also
1376 * address this command. Be aware that the error recovery
1377 * code assumes that only one recovery action is in progress
1378 * on a particular peripheral instance at any given time
1379 * (e.g. only one saved CCB for error recovery) so it is
1380 * imperitive that we don't violate this assumption.
1384 scsi_sense_action err_action;
1385 struct ccb_getdev cgd;
1386 const char *action_string;
1387 union ccb* print_ccb;
1389 /* A description of the error recovery action performed */
1390 action_string = NULL;
1393 * The location of the orignal ccb
1394 * for sense printing purposes.
1399 * Grab the inquiry data for this device.
1401 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1);
1402 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1403 xpt_action((union ccb *)&cgd);
1405 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
1406 err_action = scsi_error_action(&ccb->csio,
1409 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
1410 err_action = SS_REQSENSE;
1412 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1414 error = err_action & SS_ERRMASK;
1417 * If the recovery action will consume a retry,
1418 * make sure we actually have retries available.
1420 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1421 if (ccb->ccb_h.retry_count > 0)
1422 ccb->ccb_h.retry_count--;
1424 action_string = "Retries Exhausted";
1425 goto sense_error_done;
1429 if ((err_action & SS_MASK) >= SS_START) {
1431 * Do common portions of commands that
1432 * use recovery CCBs.
1434 if (save_ccb == NULL) {
1435 action_string = "No recovery CCB supplied";
1436 goto sense_error_done;
1438 bcopy(ccb, save_ccb, sizeof(*save_ccb));
1439 print_ccb = save_ccb;
1440 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1443 switch (err_action & SS_MASK) {
1445 action_string = "No Recovery Action Needed";
1449 action_string = "Retrying Command (per Sense Data)";
1453 action_string = "Unretryable error";
1460 * Send a start unit command to the device, and
1461 * then retry the command.
1463 action_string = "Attempting to Start Unit";
1466 * Check for removable media and set
1467 * load/eject flag appropriately.
1469 if (SID_IS_REMOVABLE(&cgd.inq_data))
1474 scsi_start_stop(&ccb->csio,
1488 * Send a Test Unit Ready to the device.
1489 * If the 'many' flag is set, we send 120
1490 * test unit ready commands, one every half
1491 * second. Otherwise, we just send one TUR.
1492 * We only want to do this if the retry
1493 * count has not been exhausted.
1497 if ((err_action & SSQ_MANY) != 0) {
1498 action_string = "Polling device for readiness";
1501 action_string = "Testing device for readiness";
1504 scsi_test_unit_ready(&ccb->csio,
1512 * Accomplish our 500ms delay by deferring
1513 * the release of our device queue appropriately.
1515 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1522 * Send a Request Sense to the device. We
1523 * assume that we are in a contingent allegiance
1524 * condition so we do not tag this request.
1526 scsi_request_sense(&ccb->csio, /*retries*/1,
1528 &save_ccb->csio.sense_data,
1529 sizeof(save_ccb->csio.sense_data),
1530 CAM_TAG_ACTION_NONE,
1531 /*sense_len*/SSD_FULL_SIZE,
1536 panic("Unhandled error action %x", err_action);
1539 if ((err_action & SS_MASK) >= SS_START) {
1541 * Drop the priority to 0 so that the recovery
1542 * CCB is the first to execute. Freeze the queue
1543 * after this command is sent so that we can
1544 * restore the old csio and have it queued in
1545 * the proper order before we release normal
1546 * transactions to the device.
1548 ccb->ccb_h.pinfo.priority = 0;
1549 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1550 ccb->ccb_h.saved_ccb_ptr = save_ccb;
1555 if ((err_action & SSQ_PRINT_SENSE) != 0
1556 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) {
1557 cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1558 xpt_print_path(ccb->ccb_h.path);
1560 scsi_sense_print(&print_ccb->csio);
1561 printf("%s\n", action_string);
1568 * Generic error handler. Peripheral drivers usually filter
1569 * out the errors that they handle in a unique mannor, then
1570 * call this function.
1573 cam_periph_error(union ccb *ccb, cam_flags camflags,
1574 u_int32_t sense_flags, union ccb *save_ccb)
1576 const char *action_string;
1579 int error, printed = 0;
1581 u_int32_t relsim_flags;
1582 u_int32_t timeout = 0;
1584 action_string = NULL;
1585 status = ccb->ccb_h.status;
1586 frozen = (status & CAM_DEV_QFRZN) != 0;
1587 status &= CAM_STATUS_MASK;
1588 openings = relsim_flags = 0;
1594 case CAM_SCSI_STATUS_ERROR:
1595 error = camperiphscsistatuserror(ccb,
1603 case CAM_AUTOSENSE_FAIL:
1604 xpt_print(ccb->ccb_h.path, "AutoSense Failed\n");
1605 error = EIO; /* we have to kill the command */
1607 case CAM_ATA_STATUS_ERROR:
1608 if (bootverbose && printed == 0) {
1609 xpt_print(ccb->ccb_h.path,
1610 "Request completed with CAM_ATA_STATUS_ERROR\n");
1614 case CAM_REQ_CMP_ERR:
1615 if (bootverbose && printed == 0) {
1616 xpt_print(ccb->ccb_h.path,
1617 "Request completed with CAM_REQ_CMP_ERR\n");
1621 case CAM_CMD_TIMEOUT:
1622 if (bootverbose && printed == 0) {
1623 xpt_print(ccb->ccb_h.path, "Command timed out\n");
1627 case CAM_UNEXP_BUSFREE:
1628 if (bootverbose && printed == 0) {
1629 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n");
1633 case CAM_UNCOR_PARITY:
1634 if (bootverbose && printed == 0) {
1635 xpt_print(ccb->ccb_h.path,
1636 "Uncorrected Parity Error\n");
1640 case CAM_DATA_RUN_ERR:
1641 if (bootverbose && printed == 0) {
1642 xpt_print(ccb->ccb_h.path, "Data Overrun\n");
1645 error = EIO; /* we have to kill the command */
1646 /* decrement the number of retries */
1647 if (ccb->ccb_h.retry_count > 0) {
1648 ccb->ccb_h.retry_count--;
1651 action_string = "Retries Exhausted";
1657 case CAM_MSG_REJECT_REC:
1658 /* XXX Don't know that these are correct */
1661 case CAM_SEL_TIMEOUT:
1663 struct cam_path *newpath;
1665 if ((camflags & CAM_RETRY_SELTO) != 0) {
1666 if (ccb->ccb_h.retry_count > 0) {
1668 ccb->ccb_h.retry_count--;
1670 if (bootverbose && printed == 0) {
1671 xpt_print(ccb->ccb_h.path,
1672 "Selection Timeout\n");
1677 * Wait a bit to give the device
1678 * time to recover before we try again.
1680 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1681 timeout = periph_selto_delay;
1686 /* Should we do more if we can't create the path?? */
1687 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path),
1688 xpt_path_path_id(ccb->ccb_h.path),
1689 xpt_path_target_id(ccb->ccb_h.path),
1690 CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1694 * Let peripheral drivers know that this device has gone
1697 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1698 xpt_free_path(newpath);
1701 case CAM_REQ_INVALID:
1702 case CAM_PATH_INVALID:
1703 case CAM_DEV_NOT_THERE:
1705 case CAM_PROVIDE_FAIL:
1706 case CAM_REQ_TOO_BIG:
1707 case CAM_LUN_INVALID:
1708 case CAM_TID_INVALID:
1711 case CAM_SCSI_BUS_RESET:
1714 * Commands that repeatedly timeout and cause these
1715 * kinds of error recovery actions, should return
1716 * CAM_CMD_TIMEOUT, which allows us to safely assume
1717 * that this command was an innocent bystander to
1718 * these events and should be unconditionally
1721 if (bootverbose && printed == 0) {
1722 xpt_print_path(ccb->ccb_h.path);
1723 if (status == CAM_BDR_SENT)
1724 printf("Bus Device Reset sent\n");
1726 printf("Bus Reset issued\n");
1730 case CAM_REQUEUE_REQ:
1731 /* Unconditional requeue */
1733 if (bootverbose && printed == 0) {
1734 xpt_print(ccb->ccb_h.path, "Request Requeued\n");
1738 case CAM_RESRC_UNAVAIL:
1739 /* Wait a bit for the resource shortage to abate. */
1740 timeout = periph_noresrc_delay;
1744 /* Wait a bit for the busy condition to abate. */
1745 timeout = periph_busy_delay;
1747 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1750 /* decrement the number of retries */
1751 if (ccb->ccb_h.retry_count > 0) {
1752 ccb->ccb_h.retry_count--;
1754 if (bootverbose && printed == 0) {
1755 xpt_print(ccb->ccb_h.path, "CAM Status 0x%x\n",
1761 action_string = "Retries Exhausted";
1766 /* Attempt a retry */
1767 if (error == ERESTART || error == 0) {
1769 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1771 if (error == ERESTART) {
1772 action_string = "Retrying Command";
1777 cam_release_devq(ccb->ccb_h.path,
1781 /*getcount_only*/0);
1785 * If we have and error and are booting verbosely, whine
1786 * *unless* this was a non-retryable selection timeout.
1788 if (error != 0 && bootverbose &&
1789 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) {
1792 if (action_string == NULL)
1793 action_string = "Unretryable Error";
1794 if (error != ERESTART) {
1795 xpt_print(ccb->ccb_h.path, "error %d\n", error);
1797 xpt_print(ccb->ccb_h.path, "%s\n", action_string);