2 * Common functions for CAM "type" (peripheral) drivers.
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
40 #include <sys/mutex.h>
43 #include <sys/devicestat.h>
47 #include <vm/vm_extern.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_queue.h>
52 #include <cam/cam_xpt_periph.h>
53 #include <cam/cam_periph.h>
54 #include <cam/cam_debug.h>
55 #include <cam/cam_sim.h>
57 #include <cam/scsi/scsi_all.h>
58 #include <cam/scsi/scsi_message.h>
59 #include <cam/scsi/scsi_pass.h>
61 static u_int camperiphnextunit(struct periph_driver *p_drv,
62 u_int newunit, int wired,
63 path_id_t pathid, target_id_t target,
65 static u_int camperiphunit(struct periph_driver *p_drv,
66 path_id_t pathid, target_id_t target,
68 static void camperiphdone(struct cam_periph *periph,
70 static void camperiphfree(struct cam_periph *periph);
71 static int camperiphscsistatuserror(union ccb *ccb,
74 u_int32_t sense_flags,
76 u_int32_t *relsim_flags,
79 const char **action_string);
80 static int camperiphscsisenseerror(union ccb *ccb,
83 u_int32_t sense_flags,
85 u_int32_t *relsim_flags,
88 const char **action_string);
89 static void cam_periph_devctl_notify(union ccb *ccb);
91 static int nperiph_drivers;
92 static int initialized = 0;
93 struct periph_driver **periph_drivers;
95 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
97 static int periph_selto_delay = 1000;
98 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
99 static int periph_noresrc_delay = 500;
100 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
101 static int periph_busy_delay = 500;
102 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
106 periphdriver_register(void *data)
108 struct periph_driver *drv = (struct periph_driver *)data;
109 struct periph_driver **newdrivers, **old;
113 ndrivers = nperiph_drivers + 2;
114 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
117 if (ndrivers != nperiph_drivers + 2) {
119 * Lost race against itself; go around.
122 free(newdrivers, M_CAMPERIPH);
126 bcopy(periph_drivers, newdrivers,
127 sizeof(*newdrivers) * nperiph_drivers);
128 newdrivers[nperiph_drivers] = drv;
129 newdrivers[nperiph_drivers + 1] = NULL;
130 old = periph_drivers;
131 periph_drivers = newdrivers;
135 free(old, M_CAMPERIPH);
136 /* If driver marked as early or it is late now, initialize it. */
137 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
143 periphdriver_init(int level)
147 initialized = max(initialized, level);
148 for (i = 0; periph_drivers[i] != NULL; i++) {
149 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
150 if (early == initialized)
151 (*periph_drivers[i]->init)();
156 cam_periph_alloc(periph_ctor_t *periph_ctor,
157 periph_oninv_t *periph_oninvalidate,
158 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
159 char *name, cam_periph_type type, struct cam_path *path,
160 ac_callback_t *ac_callback, ac_code code, void *arg)
162 struct periph_driver **p_drv;
164 struct cam_periph *periph;
165 struct cam_periph *cur_periph;
167 target_id_t target_id;
174 * Handle Hot-Plug scenarios. If there is already a peripheral
175 * of our type assigned to this path, we are likely waiting for
176 * final close on an old, invalidated, peripheral. If this is
177 * the case, queue up a deferred call to the peripheral's async
178 * handler. If it looks like a mistaken re-allocation, complain.
180 if ((periph = cam_periph_find(path, name)) != NULL) {
182 if ((periph->flags & CAM_PERIPH_INVALID) != 0
183 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
184 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
185 periph->deferred_callback = ac_callback;
186 periph->deferred_ac = code;
187 return (CAM_REQ_INPROG);
189 printf("cam_periph_alloc: attempt to re-allocate "
190 "valid device %s%d rejected flags %#x "
191 "refcount %d\n", periph->periph_name,
192 periph->unit_number, periph->flags,
195 return (CAM_REQ_INVALID);
198 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
202 return (CAM_RESRC_UNAVAIL);
207 sim = xpt_path_sim(path);
208 path_id = xpt_path_path_id(path);
209 target_id = xpt_path_target_id(path);
210 lun_id = xpt_path_lun_id(path);
211 periph->periph_start = periph_start;
212 periph->periph_dtor = periph_dtor;
213 periph->periph_oninval = periph_oninvalidate;
215 periph->periph_name = name;
216 periph->scheduled_priority = CAM_PRIORITY_NONE;
217 periph->immediate_priority = CAM_PRIORITY_NONE;
218 periph->refcount = 1; /* Dropped by invalidation. */
220 SLIST_INIT(&periph->ccb_list);
221 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
222 if (status != CAM_REQ_CMP)
227 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
228 if (strcmp((*p_drv)->driver_name, name) == 0)
231 if (*p_drv == NULL) {
232 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
234 xpt_free_path(periph->path);
235 free(periph, M_CAMPERIPH);
236 return (CAM_REQ_INVALID);
238 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
239 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
240 while (cur_periph != NULL
241 && cur_periph->unit_number < periph->unit_number)
242 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
243 if (cur_periph != NULL) {
244 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
245 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
247 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
248 (*p_drv)->generation++;
254 status = xpt_add_periph(periph);
255 if (status != CAM_REQ_CMP)
259 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
261 status = periph_ctor(periph, arg);
263 if (status == CAM_REQ_CMP)
267 switch (init_level) {
269 /* Initialized successfully */
272 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
273 xpt_remove_periph(periph);
277 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
279 xpt_free_path(periph->path);
282 free(periph, M_CAMPERIPH);
285 /* No cleanup to perform. */
288 panic("%s: Unknown init level", __func__);
294 * Find a peripheral structure with the specified path, target, lun,
295 * and (optionally) type. If the name is NULL, this function will return
296 * the first peripheral driver that matches the specified path.
299 cam_periph_find(struct cam_path *path, char *name)
301 struct periph_driver **p_drv;
302 struct cam_periph *periph;
305 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
307 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
310 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
311 if (xpt_path_comp(periph->path, path) == 0) {
313 cam_periph_assert(periph, MA_OWNED);
327 * Find peripheral driver instances attached to the specified path.
330 cam_periph_list(struct cam_path *path, struct sbuf *sb)
332 struct sbuf local_sb;
333 struct periph_driver **p_drv;
334 struct cam_periph *periph;
340 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
343 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
345 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
346 if (xpt_path_comp(periph->path, path) != 0)
349 if (sbuf_len(&local_sb) != 0)
350 sbuf_cat(&local_sb, ",");
352 sbuf_printf(&local_sb, "%s%d", periph->periph_name,
353 periph->unit_number);
355 if (sbuf_error(&local_sb) == ENOMEM) {
358 sbuf_delete(&local_sb);
365 sbuf_finish(&local_sb);
366 sbuf_cpy(sb, sbuf_data(&local_sb));
367 sbuf_delete(&local_sb);
372 cam_periph_acquire(struct cam_periph *periph)
376 status = CAM_REQ_CMP_ERR;
381 if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
383 status = CAM_REQ_CMP;
391 cam_periph_doacquire(struct cam_periph *periph)
395 KASSERT(periph->refcount >= 1,
396 ("cam_periph_doacquire() with refcount == %d", periph->refcount));
402 cam_periph_release_locked_buses(struct cam_periph *periph)
405 cam_periph_assert(periph, MA_OWNED);
406 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
407 if (--periph->refcount == 0)
408 camperiphfree(periph);
412 cam_periph_release_locked(struct cam_periph *periph)
419 cam_periph_release_locked_buses(periph);
424 cam_periph_release(struct cam_periph *periph)
431 cam_periph_assert(periph, MA_NOTOWNED);
432 mtx = cam_periph_mtx(periph);
434 cam_periph_release_locked(periph);
439 cam_periph_hold(struct cam_periph *periph, int priority)
444 * Increment the reference count on the peripheral
445 * while we wait for our lock attempt to succeed
446 * to ensure the peripheral doesn't disappear out
447 * from user us while we sleep.
450 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
453 cam_periph_assert(periph, MA_OWNED);
454 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
455 periph->flags |= CAM_PERIPH_LOCK_WANTED;
456 if ((error = cam_periph_sleep(periph, periph, priority,
457 "caplck", 0)) != 0) {
458 cam_periph_release_locked(periph);
461 if (periph->flags & CAM_PERIPH_INVALID) {
462 cam_periph_release_locked(periph);
467 periph->flags |= CAM_PERIPH_LOCKED;
472 cam_periph_unhold(struct cam_periph *periph)
475 cam_periph_assert(periph, MA_OWNED);
477 periph->flags &= ~CAM_PERIPH_LOCKED;
478 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
479 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
483 cam_periph_release_locked(periph);
487 * Look for the next unit number that is not currently in use for this
488 * peripheral type starting at "newunit". Also exclude unit numbers that
489 * are reserved by for future "hardwiring" unless we already know that this
490 * is a potential wired device. Only assume that the device is "wired" the
491 * first time through the loop since after that we'll be looking at unit
492 * numbers that did not match a wiring entry.
495 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
496 path_id_t pathid, target_id_t target, lun_id_t lun)
498 struct cam_periph *periph;
500 int i, val, dunit, r;
501 const char *dname, *strval;
503 periph_name = p_drv->driver_name;
506 for (periph = TAILQ_FIRST(&p_drv->units);
507 periph != NULL && periph->unit_number != newunit;
508 periph = TAILQ_NEXT(periph, unit_links))
511 if (periph != NULL && periph->unit_number == newunit) {
513 xpt_print(periph->path, "Duplicate Wired "
515 xpt_print(periph->path, "Second device (%s "
516 "device at scbus%d target %d lun %d) will "
517 "not be wired\n", periph_name, pathid,
527 * Don't match entries like "da 4" as a wired down
528 * device, but do match entries like "da 4 target 5"
529 * or even "da 4 scbus 1".
534 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
537 /* if no "target" and no specific scbus, skip */
538 if (resource_int_value(dname, dunit, "target", &val) &&
539 (resource_string_value(dname, dunit, "at",&strval)||
540 strcmp(strval, "scbus") == 0))
542 if (newunit == dunit)
552 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
553 target_id_t target, lun_id_t lun)
556 int wired, i, val, dunit;
557 const char *dname, *strval;
558 char pathbuf[32], *periph_name;
560 periph_name = p_drv->driver_name;
561 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
565 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
567 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
568 if (strcmp(strval, pathbuf) != 0)
572 if (resource_int_value(dname, dunit, "target", &val) == 0) {
577 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
589 * Either start from 0 looking for the next unit or from
590 * the unit number given in the resource config. This way,
591 * if we have wildcard matches, we don't return the same
594 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
600 cam_periph_invalidate(struct cam_periph *periph)
603 cam_periph_assert(periph, MA_OWNED);
605 * We only call this routine the first time a peripheral is
608 if ((periph->flags & CAM_PERIPH_INVALID) != 0)
611 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
612 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
613 xpt_denounce_periph(periph);
614 periph->flags |= CAM_PERIPH_INVALID;
615 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
616 if (periph->periph_oninval != NULL)
617 periph->periph_oninval(periph);
618 cam_periph_release_locked(periph);
622 camperiphfree(struct cam_periph *periph)
624 struct periph_driver **p_drv;
625 struct periph_driver *drv;
627 cam_periph_assert(periph, MA_OWNED);
628 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
629 periph->periph_name, periph->unit_number));
630 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
631 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
634 if (*p_drv == NULL) {
635 printf("camperiphfree: attempt to free non-existant periph\n");
639 * Cache a pointer to the periph_driver structure. If a
640 * periph_driver is added or removed from the array (see
641 * periphdriver_register()) while we drop the toplogy lock
642 * below, p_drv may change. This doesn't protect against this
643 * particular periph_driver going away. That will require full
644 * reference counting in the periph_driver infrastructure.
649 * We need to set this flag before dropping the topology lock, to
650 * let anyone who is traversing the list that this peripheral is
651 * about to be freed, and there will be no more reference count
654 periph->flags |= CAM_PERIPH_FREE;
657 * The peripheral destructor semantics dictate calling with only the
658 * SIM mutex held. Since it might sleep, it should not be called
659 * with the topology lock held.
664 * We need to call the peripheral destructor prior to removing the
665 * peripheral from the list. Otherwise, we risk running into a
666 * scenario where the peripheral unit number may get reused
667 * (because it has been removed from the list), but some resources
668 * used by the peripheral are still hanging around. In particular,
669 * the devfs nodes used by some peripherals like the pass(4) driver
670 * aren't fully cleaned up until the destructor is run. If the
671 * unit number is reused before the devfs instance is fully gone,
674 if (periph->periph_dtor != NULL)
675 periph->periph_dtor(periph);
678 * The peripheral list is protected by the topology lock.
682 TAILQ_REMOVE(&drv->units, periph, unit_links);
685 xpt_remove_periph(periph);
688 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
689 xpt_print(periph->path, "Periph destroyed\n");
691 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
693 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
697 switch (periph->deferred_ac) {
698 case AC_FOUND_DEVICE:
699 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
700 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
704 case AC_PATH_REGISTERED:
705 ccb.ccb_h.func_code = XPT_PATH_INQ;
706 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
714 periph->deferred_callback(NULL, periph->deferred_ac,
717 xpt_free_path(periph->path);
718 free(periph, M_CAMPERIPH);
723 * Map user virtual pointers into kernel virtual address space, so we can
724 * access the memory. This is now a generic function that centralizes most
725 * of the sanity checks on the data flags, if any.
726 * This also only works for up to MAXPHYS memory. Since we use
727 * buffers to map stuff in and out, we're limited to the buffer size.
730 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
734 int flags[CAM_PERIPH_MAXMAPS];
735 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
736 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
737 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
740 maxmap = DFLTPHYS; /* traditional default */
741 else if (maxmap > MAXPHYS)
742 maxmap = MAXPHYS; /* for safety */
743 switch(ccb->ccb_h.func_code) {
745 if (ccb->cdm.match_buf_len == 0) {
746 printf("cam_periph_mapmem: invalid match buffer "
750 if (ccb->cdm.pattern_buf_len > 0) {
751 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
752 lengths[0] = ccb->cdm.pattern_buf_len;
753 dirs[0] = CAM_DIR_OUT;
754 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
755 lengths[1] = ccb->cdm.match_buf_len;
756 dirs[1] = CAM_DIR_IN;
759 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
760 lengths[0] = ccb->cdm.match_buf_len;
761 dirs[0] = CAM_DIR_IN;
765 * This request will not go to the hardware, no reason
766 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
771 case XPT_CONT_TARGET_IO:
772 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
774 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
776 data_ptrs[0] = &ccb->csio.data_ptr;
777 lengths[0] = ccb->csio.dxfer_len;
778 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
782 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
784 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
786 data_ptrs[0] = &ccb->ataio.data_ptr;
787 lengths[0] = ccb->ataio.dxfer_len;
788 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
792 data_ptrs[0] = &ccb->smpio.smp_request;
793 lengths[0] = ccb->smpio.smp_request_len;
794 dirs[0] = CAM_DIR_OUT;
795 data_ptrs[1] = &ccb->smpio.smp_response;
796 lengths[1] = ccb->smpio.smp_response_len;
797 dirs[1] = CAM_DIR_IN;
800 case XPT_DEV_ADVINFO:
801 if (ccb->cdai.bufsiz == 0)
804 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
805 lengths[0] = ccb->cdai.bufsiz;
806 dirs[0] = CAM_DIR_IN;
810 * This request will not go to the hardware, no reason
811 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
817 break; /* NOTREACHED */
821 * Check the transfer length and permissions first, so we don't
822 * have to unmap any previously mapped buffers.
824 for (i = 0; i < numbufs; i++) {
829 * The userland data pointer passed in may not be page
830 * aligned. vmapbuf() truncates the address to a page
831 * boundary, so if the address isn't page aligned, we'll
832 * need enough space for the given transfer length, plus
833 * whatever extra space is necessary to make it to the page
837 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){
838 printf("cam_periph_mapmem: attempt to map %lu bytes, "
839 "which is greater than %lu\n",
841 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
846 if (dirs[i] & CAM_DIR_OUT) {
847 flags[i] = BIO_WRITE;
850 if (dirs[i] & CAM_DIR_IN) {
857 * This keeps the the kernel stack of current thread from getting
858 * swapped. In low-memory situations where the kernel stack might
859 * otherwise get swapped out, this holds it and allows the thread
860 * to make progress and release the kernel mapped pages sooner.
862 * XXX KDM should I use P_NOSWAP instead?
866 for (i = 0; i < numbufs; i++) {
870 mapinfo->bp[i] = getpbuf(NULL);
872 /* save the buffer's data address */
873 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data;
875 /* put our pointer in the data slot */
876 mapinfo->bp[i]->b_data = *data_ptrs[i];
878 /* set the transfer length, we know it's < MAXPHYS */
879 mapinfo->bp[i]->b_bufsize = lengths[i];
881 /* set the direction */
882 mapinfo->bp[i]->b_iocmd = flags[i];
885 * Map the buffer into kernel memory.
887 * Note that useracc() alone is not a sufficient test.
888 * vmapbuf() can still fail due to a smaller file mapped
889 * into a larger area of VM, or if userland races against
890 * vmapbuf() after the useracc() check.
892 if (vmapbuf(mapinfo->bp[i], 1) < 0) {
893 for (j = 0; j < i; ++j) {
894 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr;
895 vunmapbuf(mapinfo->bp[j]);
896 relpbuf(mapinfo->bp[j], NULL);
898 relpbuf(mapinfo->bp[i], NULL);
903 /* set our pointer to the new mapped area */
904 *data_ptrs[i] = mapinfo->bp[i]->b_data;
906 mapinfo->num_bufs_used++;
910 * Now that we've gotten this far, change ownership to the kernel
911 * of the buffers so that we don't run afoul of returning to user
912 * space with locks (on the buffer) held.
914 for (i = 0; i < numbufs; i++) {
915 BUF_KERNPROC(mapinfo->bp[i]);
923 * Unmap memory segments mapped into kernel virtual address space by
924 * cam_periph_mapmem().
927 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
930 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
932 if (mapinfo->num_bufs_used <= 0) {
933 /* nothing to free and the process wasn't held. */
937 switch (ccb->ccb_h.func_code) {
939 numbufs = min(mapinfo->num_bufs_used, 2);
942 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
944 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
945 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
949 case XPT_CONT_TARGET_IO:
950 data_ptrs[0] = &ccb->csio.data_ptr;
951 numbufs = min(mapinfo->num_bufs_used, 1);
954 data_ptrs[0] = &ccb->ataio.data_ptr;
955 numbufs = min(mapinfo->num_bufs_used, 1);
958 numbufs = min(mapinfo->num_bufs_used, 2);
959 data_ptrs[0] = &ccb->smpio.smp_request;
960 data_ptrs[1] = &ccb->smpio.smp_response;
962 case XPT_DEV_ADVINFO:
963 numbufs = min(mapinfo->num_bufs_used, 1);
964 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
967 /* allow ourselves to be swapped once again */
970 break; /* NOTREACHED */
973 for (i = 0; i < numbufs; i++) {
974 /* Set the user's pointer back to the original value */
975 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr;
977 /* unmap the buffer */
978 vunmapbuf(mapinfo->bp[i]);
980 /* release the buffer */
981 relpbuf(mapinfo->bp[i], NULL);
984 /* allow ourselves to be swapped once again */
989 cam_periph_ccbwait(union ccb *ccb)
992 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX)
993 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG))
994 xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp, PRIBIO,
999 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
1000 int (*error_routine)(union ccb *ccb,
1002 u_int32_t sense_flags))
1011 case CAMGETPASSTHRU:
1012 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1013 xpt_setup_ccb(&ccb->ccb_h,
1015 CAM_PRIORITY_NORMAL);
1016 ccb->ccb_h.func_code = XPT_GDEVLIST;
1019 * Basically, the point of this is that we go through
1020 * getting the list of devices, until we find a passthrough
1021 * device. In the current version of the CAM code, the
1022 * only way to determine what type of device we're dealing
1023 * with is by its name.
1025 while (found == 0) {
1026 ccb->cgdl.index = 0;
1027 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
1028 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
1030 /* we want the next device in the list */
1032 if (strncmp(ccb->cgdl.periph_name,
1038 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
1040 ccb->cgdl.periph_name[0] = '\0';
1041 ccb->cgdl.unit_number = 0;
1046 /* copy the result back out */
1047 bcopy(ccb, addr, sizeof(union ccb));
1049 /* and release the ccb */
1050 xpt_release_ccb(ccb);
1061 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1064 /* Caller will release the CCB */
1065 wakeup(&done_ccb->ccb_h.cbfcnp);
1069 cam_periph_runccb(union ccb *ccb,
1070 int (*error_routine)(union ccb *ccb,
1072 u_int32_t sense_flags),
1073 cam_flags camflags, u_int32_t sense_flags,
1076 struct bintime *starttime;
1077 struct bintime ltime;
1081 xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1084 * If the user has supplied a stats structure, and if we understand
1085 * this particular type of ccb, record the transaction start.
1087 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1088 ccb->ccb_h.func_code == XPT_ATA_IO)) {
1090 binuptime(starttime);
1091 devstat_start_transaction(ds, starttime);
1094 ccb->ccb_h.cbfcnp = cam_periph_done;
1098 cam_periph_ccbwait(ccb);
1099 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1101 else if (error_routine != NULL)
1102 error = (*error_routine)(ccb, camflags, sense_flags);
1106 } while (error == ERESTART);
1108 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1109 cam_release_devq(ccb->ccb_h.path,
1110 /* relsim_flags */0,
1113 /* getcount_only */ FALSE);
1114 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1118 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1119 devstat_end_transaction(ds,
1120 ccb->csio.dxfer_len - ccb->csio.resid,
1121 ccb->csio.tag_action & 0x3,
1122 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
1123 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
1124 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1126 DEVSTAT_READ, NULL, starttime);
1127 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1128 devstat_end_transaction(ds,
1129 ccb->ataio.dxfer_len - ccb->ataio.resid,
1130 ccb->ataio.tag_action & 0x3,
1131 ((ccb->ccb_h.flags & CAM_DIR_MASK) ==
1132 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
1133 (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1135 DEVSTAT_READ, NULL, starttime);
1143 cam_freeze_devq(struct cam_path *path)
1145 struct ccb_hdr ccb_h;
1147 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1148 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1149 ccb_h.func_code = XPT_NOOP;
1150 ccb_h.flags = CAM_DEV_QFREEZE;
1151 xpt_action((union ccb *)&ccb_h);
1155 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1156 u_int32_t openings, u_int32_t arg,
1159 struct ccb_relsim crs;
1161 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1162 relsim_flags, openings, arg, getcount_only));
1163 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1164 crs.ccb_h.func_code = XPT_REL_SIMQ;
1165 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1166 crs.release_flags = relsim_flags;
1167 crs.openings = openings;
1168 crs.release_timeout = arg;
1169 xpt_action((union ccb *)&crs);
1170 return (crs.qfrozen_cnt);
1173 #define saved_ccb_ptr ppriv_ptr0
1175 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1177 union ccb *saved_ccb;
1179 struct scsi_start_stop_unit *scsi_cmd;
1180 int error_code, sense_key, asc, ascq;
1182 scsi_cmd = (struct scsi_start_stop_unit *)
1183 &done_ccb->csio.cdb_io.cdb_bytes;
1184 status = done_ccb->ccb_h.status;
1186 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1187 if (scsi_extract_sense_ccb(done_ccb,
1188 &error_code, &sense_key, &asc, &ascq)) {
1190 * If the error is "invalid field in CDB",
1191 * and the load/eject flag is set, turn the
1192 * flag off and try again. This is just in
1193 * case the drive in question barfs on the
1194 * load eject flag. The CAM code should set
1195 * the load/eject flag by default for
1198 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1199 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1200 (asc == 0x24) && (ascq == 0x00)) {
1201 scsi_cmd->how &= ~SSS_LOEJ;
1202 if (status & CAM_DEV_QFRZN) {
1203 cam_release_devq(done_ccb->ccb_h.path,
1205 done_ccb->ccb_h.status &=
1208 xpt_action(done_ccb);
1212 if (cam_periph_error(done_ccb,
1213 0, SF_RETRY_UA | SF_NO_PRINT, NULL) == ERESTART)
1215 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1216 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1217 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1221 * If we have successfully taken a device from the not
1222 * ready to ready state, re-scan the device and re-get
1223 * the inquiry information. Many devices (mostly disks)
1224 * don't properly report their inquiry information unless
1227 if (scsi_cmd->opcode == START_STOP_UNIT)
1228 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1232 * Perform the final retry with the original CCB so that final
1233 * error processing is performed by the owner of the CCB.
1235 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1236 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
1237 xpt_free_ccb(saved_ccb);
1238 if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1239 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1240 xpt_action(done_ccb);
1243 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1244 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1248 * Generic Async Event handler. Peripheral drivers usually
1249 * filter out the events that require personal attention,
1250 * and leave the rest to this function.
1253 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1254 struct cam_path *path, void *arg)
1257 case AC_LOST_DEVICE:
1258 cam_periph_invalidate(periph);
1266 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1268 struct ccb_getdevstats cgds;
1270 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1271 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1272 xpt_action((union ccb *)&cgds);
1273 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1277 cam_periph_freeze_after_event(struct cam_periph *periph,
1278 struct timeval* event_time, u_int duration_ms)
1280 struct timeval delta;
1281 struct timeval duration_tv;
1283 if (!timevalisset(event_time))
1287 timevalsub(&delta, event_time);
1288 duration_tv.tv_sec = duration_ms / 1000;
1289 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1290 if (timevalcmp(&delta, &duration_tv, <)) {
1291 timevalsub(&duration_tv, &delta);
1293 duration_ms = duration_tv.tv_sec * 1000;
1294 duration_ms += duration_tv.tv_usec / 1000;
1295 cam_freeze_devq(periph->path);
1296 cam_release_devq(periph->path,
1297 RELSIM_RELEASE_AFTER_TIMEOUT,
1299 /*timeout*/duration_ms,
1300 /*getcount_only*/0);
1306 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1307 cam_flags camflags, u_int32_t sense_flags,
1308 int *openings, u_int32_t *relsim_flags,
1309 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1313 switch (ccb->csio.scsi_status) {
1314 case SCSI_STATUS_OK:
1315 case SCSI_STATUS_COND_MET:
1316 case SCSI_STATUS_INTERMED:
1317 case SCSI_STATUS_INTERMED_COND_MET:
1320 case SCSI_STATUS_CMD_TERMINATED:
1321 case SCSI_STATUS_CHECK_COND:
1322 error = camperiphscsisenseerror(ccb, orig_ccb,
1331 case SCSI_STATUS_QUEUE_FULL:
1334 struct ccb_getdevstats cgds;
1337 * First off, find out what the current
1338 * transaction counts are.
1340 xpt_setup_ccb(&cgds.ccb_h,
1342 CAM_PRIORITY_NORMAL);
1343 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1344 xpt_action((union ccb *)&cgds);
1347 * If we were the only transaction active, treat
1348 * the QUEUE FULL as if it were a BUSY condition.
1350 if (cgds.dev_active != 0) {
1354 * Reduce the number of openings to
1355 * be 1 less than the amount it took
1356 * to get a queue full bounded by the
1357 * minimum allowed tag count for this
1360 total_openings = cgds.dev_active + cgds.dev_openings;
1361 *openings = cgds.dev_active;
1362 if (*openings < cgds.mintags)
1363 *openings = cgds.mintags;
1364 if (*openings < total_openings)
1365 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1368 * Some devices report queue full for
1369 * temporary resource shortages. For
1370 * this reason, we allow a minimum
1371 * tag count to be entered via a
1372 * quirk entry to prevent the queue
1373 * count on these devices from falling
1374 * to a pessimisticly low value. We
1375 * still wait for the next successful
1376 * completion, however, before queueing
1377 * more transactions to the device.
1379 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1383 *action &= ~SSQ_PRINT_SENSE;
1388 case SCSI_STATUS_BUSY:
1390 * Restart the queue after either another
1391 * command completes or a 1 second timeout.
1393 if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1394 (ccb->ccb_h.retry_count--) > 0) {
1396 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1397 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1403 case SCSI_STATUS_RESERV_CONFLICT:
1412 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1413 cam_flags camflags, u_int32_t sense_flags,
1414 int *openings, u_int32_t *relsim_flags,
1415 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1417 struct cam_periph *periph;
1418 union ccb *orig_ccb = ccb;
1419 int error, recoveryccb;
1421 periph = xpt_path_periph(ccb->ccb_h.path);
1422 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1423 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1425 * If error recovery is already in progress, don't attempt
1426 * to process this error, but requeue it unconditionally
1427 * and attempt to process it once error recovery has
1428 * completed. This failed command is probably related to
1429 * the error that caused the currently active error recovery
1430 * action so our current recovery efforts should also
1431 * address this command. Be aware that the error recovery
1432 * code assumes that only one recovery action is in progress
1433 * on a particular peripheral instance at any given time
1434 * (e.g. only one saved CCB for error recovery) so it is
1435 * imperitive that we don't violate this assumption.
1438 *action &= ~SSQ_PRINT_SENSE;
1440 scsi_sense_action err_action;
1441 struct ccb_getdev cgd;
1444 * Grab the inquiry data for this device.
1446 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1447 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1448 xpt_action((union ccb *)&cgd);
1450 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1452 error = err_action & SS_ERRMASK;
1455 * Do not autostart sequential access devices
1456 * to avoid unexpected tape loading.
1458 if ((err_action & SS_MASK) == SS_START &&
1459 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1460 *action_string = "Will not autostart a "
1461 "sequential access device";
1462 goto sense_error_done;
1466 * Avoid recovery recursion if recovery action is the same.
1468 if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1469 if (((err_action & SS_MASK) == SS_START &&
1470 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1471 ((err_action & SS_MASK) == SS_TUR &&
1472 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1473 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1474 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1480 * If the recovery action will consume a retry,
1481 * make sure we actually have retries available.
1483 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1484 if (ccb->ccb_h.retry_count > 0 &&
1485 (periph->flags & CAM_PERIPH_INVALID) == 0)
1486 ccb->ccb_h.retry_count--;
1488 *action_string = "Retries exhausted";
1489 goto sense_error_done;
1493 if ((err_action & SS_MASK) >= SS_START) {
1495 * Do common portions of commands that
1496 * use recovery CCBs.
1498 orig_ccb = xpt_alloc_ccb_nowait();
1499 if (orig_ccb == NULL) {
1500 *action_string = "Can't allocate recovery CCB";
1501 goto sense_error_done;
1504 * Clear freeze flag for original request here, as
1505 * this freeze will be dropped as part of ERESTART.
1507 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1508 bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
1511 switch (err_action & SS_MASK) {
1513 *action_string = "No recovery action needed";
1517 *action_string = "Retrying command (per sense data)";
1521 *action_string = "Unretryable error";
1528 * Send a start unit command to the device, and
1529 * then retry the command.
1531 *action_string = "Attempting to start unit";
1532 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1535 * Check for removable media and set
1536 * load/eject flag appropriately.
1538 if (SID_IS_REMOVABLE(&cgd.inq_data))
1543 scsi_start_stop(&ccb->csio,
1557 * Send a Test Unit Ready to the device.
1558 * If the 'many' flag is set, we send 120
1559 * test unit ready commands, one every half
1560 * second. Otherwise, we just send one TUR.
1561 * We only want to do this if the retry
1562 * count has not been exhausted.
1566 if ((err_action & SSQ_MANY) != 0) {
1567 *action_string = "Polling device for readiness";
1570 *action_string = "Testing device for readiness";
1573 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1574 scsi_test_unit_ready(&ccb->csio,
1582 * Accomplish our 500ms delay by deferring
1583 * the release of our device queue appropriately.
1585 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1590 panic("Unhandled error action %x", err_action);
1593 if ((err_action & SS_MASK) >= SS_START) {
1595 * Drop the priority, so that the recovery
1596 * CCB is the first to execute. Freeze the queue
1597 * after this command is sent so that we can
1598 * restore the old csio and have it queued in
1599 * the proper order before we release normal
1600 * transactions to the device.
1602 ccb->ccb_h.pinfo.priority--;
1603 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1604 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1610 *action = err_action;
1616 * Generic error handler. Peripheral drivers usually filter
1617 * out the errors that they handle in a unique manner, then
1618 * call this function.
1621 cam_periph_error(union ccb *ccb, cam_flags camflags,
1622 u_int32_t sense_flags, union ccb *save_ccb)
1624 struct cam_path *newpath;
1625 union ccb *orig_ccb, *scan_ccb;
1626 struct cam_periph *periph;
1627 const char *action_string;
1629 int frozen, error, openings, devctl_err;
1630 u_int32_t action, relsim_flags, timeout;
1632 action = SSQ_PRINT_SENSE;
1633 periph = xpt_path_periph(ccb->ccb_h.path);
1634 action_string = NULL;
1635 status = ccb->ccb_h.status;
1636 frozen = (status & CAM_DEV_QFRZN) != 0;
1637 status &= CAM_STATUS_MASK;
1638 devctl_err = openings = relsim_flags = timeout = 0;
1641 /* Filter the errors that should be reported via devctl */
1642 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1643 case CAM_CMD_TIMEOUT:
1644 case CAM_REQ_ABORTED:
1645 case CAM_REQ_CMP_ERR:
1646 case CAM_REQ_TERMIO:
1647 case CAM_UNREC_HBA_ERROR:
1648 case CAM_DATA_RUN_ERR:
1649 case CAM_SCSI_STATUS_ERROR:
1650 case CAM_ATA_STATUS_ERROR:
1651 case CAM_SMP_STATUS_ERROR:
1661 action &= ~SSQ_PRINT_SENSE;
1663 case CAM_SCSI_STATUS_ERROR:
1664 error = camperiphscsistatuserror(ccb, &orig_ccb,
1665 camflags, sense_flags, &openings, &relsim_flags,
1666 &timeout, &action, &action_string);
1668 case CAM_AUTOSENSE_FAIL:
1669 error = EIO; /* we have to kill the command */
1673 case CAM_MSG_REJECT_REC:
1674 /* XXX Don't know that these are correct */
1677 case CAM_SEL_TIMEOUT:
1678 if ((camflags & CAM_RETRY_SELTO) != 0) {
1679 if (ccb->ccb_h.retry_count > 0 &&
1680 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1681 ccb->ccb_h.retry_count--;
1685 * Wait a bit to give the device
1686 * time to recover before we try again.
1688 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1689 timeout = periph_selto_delay;
1692 action_string = "Retries exhausted";
1695 case CAM_DEV_NOT_THERE:
1699 case CAM_REQ_INVALID:
1700 case CAM_PATH_INVALID:
1702 case CAM_PROVIDE_FAIL:
1703 case CAM_REQ_TOO_BIG:
1704 case CAM_LUN_INVALID:
1705 case CAM_TID_INVALID:
1706 case CAM_FUNC_NOTAVAIL:
1709 case CAM_SCSI_BUS_RESET:
1712 * Commands that repeatedly timeout and cause these
1713 * kinds of error recovery actions, should return
1714 * CAM_CMD_TIMEOUT, which allows us to safely assume
1715 * that this command was an innocent bystander to
1716 * these events and should be unconditionally
1719 case CAM_REQUEUE_REQ:
1720 /* Unconditional requeue if device is still there */
1721 if (periph->flags & CAM_PERIPH_INVALID) {
1722 action_string = "Periph was invalidated";
1724 } else if (sense_flags & SF_NO_RETRY) {
1726 action_string = "Retry was blocked";
1729 action &= ~SSQ_PRINT_SENSE;
1732 case CAM_RESRC_UNAVAIL:
1733 /* Wait a bit for the resource shortage to abate. */
1734 timeout = periph_noresrc_delay;
1738 /* Wait a bit for the busy condition to abate. */
1739 timeout = periph_busy_delay;
1741 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1743 case CAM_ATA_STATUS_ERROR:
1744 case CAM_REQ_CMP_ERR:
1745 case CAM_CMD_TIMEOUT:
1746 case CAM_UNEXP_BUSFREE:
1747 case CAM_UNCOR_PARITY:
1748 case CAM_DATA_RUN_ERR:
1750 if (periph->flags & CAM_PERIPH_INVALID) {
1752 action_string = "Periph was invalidated";
1753 } else if (ccb->ccb_h.retry_count == 0) {
1755 action_string = "Retries exhausted";
1756 } else if (sense_flags & SF_NO_RETRY) {
1758 action_string = "Retry was blocked";
1760 ccb->ccb_h.retry_count--;
1766 if ((sense_flags & SF_PRINT_ALWAYS) ||
1767 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
1768 action |= SSQ_PRINT_SENSE;
1769 else if (sense_flags & SF_NO_PRINT)
1770 action &= ~SSQ_PRINT_SENSE;
1771 if ((action & SSQ_PRINT_SENSE) != 0)
1772 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1773 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
1774 if (error != ERESTART) {
1775 if (action_string == NULL)
1776 action_string = "Unretryable error";
1777 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
1778 error, action_string);
1779 } else if (action_string != NULL)
1780 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1782 xpt_print(ccb->ccb_h.path, "Retrying command\n");
1785 if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
1786 cam_periph_devctl_notify(orig_ccb);
1788 if ((action & SSQ_LOST) != 0) {
1792 * For a selection timeout, we consider all of the LUNs on
1793 * the target to be gone. If the status is CAM_DEV_NOT_THERE,
1794 * then we only get rid of the device(s) specified by the
1795 * path in the original CCB.
1797 if (status == CAM_SEL_TIMEOUT)
1798 lun_id = CAM_LUN_WILDCARD;
1800 lun_id = xpt_path_lun_id(ccb->ccb_h.path);
1802 /* Should we do more if we can't create the path?? */
1803 if (xpt_create_path(&newpath, periph,
1804 xpt_path_path_id(ccb->ccb_h.path),
1805 xpt_path_target_id(ccb->ccb_h.path),
1806 lun_id) == CAM_REQ_CMP) {
1809 * Let peripheral drivers know that this
1810 * device has gone away.
1812 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1813 xpt_free_path(newpath);
1817 /* Broadcast UNIT ATTENTIONs to all periphs. */
1818 if ((action & SSQ_UA) != 0)
1819 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
1821 /* Rescan target on "Reported LUNs data has changed" */
1822 if ((action & SSQ_RESCAN) != 0) {
1823 if (xpt_create_path(&newpath, NULL,
1824 xpt_path_path_id(ccb->ccb_h.path),
1825 xpt_path_target_id(ccb->ccb_h.path),
1826 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
1828 scan_ccb = xpt_alloc_ccb_nowait();
1829 if (scan_ccb != NULL) {
1830 scan_ccb->ccb_h.path = newpath;
1831 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
1832 scan_ccb->crcn.flags = 0;
1833 xpt_rescan(scan_ccb);
1836 "Can't allocate CCB to rescan target\n");
1837 xpt_free_path(newpath);
1842 /* Attempt a retry */
1843 if (error == ERESTART || error == 0) {
1845 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1846 if (error == ERESTART)
1849 cam_release_devq(ccb->ccb_h.path,
1853 /*getcount_only*/0);
1859 #define CAM_PERIPH_DEVD_MSG_SIZE 256
1862 cam_periph_devctl_notify(union ccb *ccb)
1864 struct cam_periph *periph;
1865 struct ccb_getdev *cgd;
1867 int serr, sk, asc, ascq;
1870 sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
1874 sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
1876 periph = xpt_path_periph(ccb->ccb_h.path);
1877 sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
1878 periph->unit_number);
1880 sbuf_printf(&sb, "serial=\"");
1881 if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
1882 xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
1883 CAM_PRIORITY_NORMAL);
1884 cgd->ccb_h.func_code = XPT_GDEV_TYPE;
1885 xpt_action((union ccb *)cgd);
1887 if (cgd->ccb_h.status == CAM_REQ_CMP)
1888 sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
1889 xpt_free_ccb((union ccb *)cgd);
1891 sbuf_printf(&sb, "\" ");
1892 sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
1894 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1895 case CAM_CMD_TIMEOUT:
1896 sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
1899 case CAM_SCSI_STATUS_ERROR:
1900 sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
1901 if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
1902 sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
1903 serr, sk, asc, ascq);
1906 case CAM_ATA_STATUS_ERROR:
1908 char res_str[(11 * 3) + 1];
1910 sbuf_printf(&sb, "RES=\"%s\" ", ata_res_string(&ccb->ataio.res,
1911 res_str, sizeof(res_str)));
1920 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1921 sbuf_printf(&sb, "CDB=\"");
1922 scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
1923 sbuf_printf(&sb, "\" ");
1924 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1925 sbuf_printf(&sb, "ACB=\"");
1926 ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
1927 sbuf_printf(&sb, "\" ");
1930 if (sbuf_finish(&sb) == 0)
1931 devctl_notify("CAM", "periph", type, sbuf_data(&sb));
1933 free(sbmsg, M_CAMPERIPH);