2 * Common functions for CAM "type" (peripheral) drivers.
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification, immediately at the beginning of the file.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/types.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
42 #include <sys/mutex.h>
45 #include <sys/devicestat.h>
49 #include <vm/vm_extern.h>
52 #include <cam/cam_ccb.h>
53 #include <cam/cam_queue.h>
54 #include <cam/cam_xpt_periph.h>
55 #include <cam/cam_periph.h>
56 #include <cam/cam_debug.h>
57 #include <cam/cam_sim.h>
59 #include <cam/scsi/scsi_all.h>
60 #include <cam/scsi/scsi_message.h>
61 #include <cam/scsi/scsi_pass.h>
63 static u_int camperiphnextunit(struct periph_driver *p_drv,
64 u_int newunit, int wired,
65 path_id_t pathid, target_id_t target,
67 static u_int camperiphunit(struct periph_driver *p_drv,
68 path_id_t pathid, target_id_t target,
70 static void camperiphdone(struct cam_periph *periph,
72 static void camperiphfree(struct cam_periph *periph);
73 static int camperiphscsistatuserror(union ccb *ccb,
76 u_int32_t sense_flags,
78 u_int32_t *relsim_flags,
81 const char **action_string);
82 static int camperiphscsisenseerror(union ccb *ccb,
85 u_int32_t sense_flags,
87 u_int32_t *relsim_flags,
90 const char **action_string);
91 static void cam_periph_devctl_notify(union ccb *ccb);
93 static int nperiph_drivers;
94 static int initialized = 0;
95 struct periph_driver **periph_drivers;
97 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
99 static int periph_selto_delay = 1000;
100 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
101 static int periph_noresrc_delay = 500;
102 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
103 static int periph_busy_delay = 500;
104 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
108 periphdriver_register(void *data)
110 struct periph_driver *drv = (struct periph_driver *)data;
111 struct periph_driver **newdrivers, **old;
115 ndrivers = nperiph_drivers + 2;
116 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
119 if (ndrivers != nperiph_drivers + 2) {
121 * Lost race against itself; go around.
124 free(newdrivers, M_CAMPERIPH);
128 bcopy(periph_drivers, newdrivers,
129 sizeof(*newdrivers) * nperiph_drivers);
130 newdrivers[nperiph_drivers] = drv;
131 newdrivers[nperiph_drivers + 1] = NULL;
132 old = periph_drivers;
133 periph_drivers = newdrivers;
137 free(old, M_CAMPERIPH);
138 /* If driver marked as early or it is late now, initialize it. */
139 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
145 periphdriver_unregister(void *data)
147 struct periph_driver *drv = (struct periph_driver *)data;
150 /* If driver marked as early or it is late now, deinitialize it. */
151 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
153 if (drv->deinit == NULL) {
154 printf("CAM periph driver '%s' doesn't have deinit.\n",
158 error = drv->deinit();
164 for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
166 KASSERT(n < nperiph_drivers,
167 ("Periph driver '%s' was not registered", drv->driver_name));
168 for (; n + 1 < nperiph_drivers; n++)
169 periph_drivers[n] = periph_drivers[n + 1];
170 periph_drivers[n + 1] = NULL;
177 periphdriver_init(int level)
181 initialized = max(initialized, level);
182 for (i = 0; periph_drivers[i] != NULL; i++) {
183 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
184 if (early == initialized)
185 (*periph_drivers[i]->init)();
190 cam_periph_alloc(periph_ctor_t *periph_ctor,
191 periph_oninv_t *periph_oninvalidate,
192 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
193 char *name, cam_periph_type type, struct cam_path *path,
194 ac_callback_t *ac_callback, ac_code code, void *arg)
196 struct periph_driver **p_drv;
198 struct cam_periph *periph;
199 struct cam_periph *cur_periph;
201 target_id_t target_id;
208 * Handle Hot-Plug scenarios. If there is already a peripheral
209 * of our type assigned to this path, we are likely waiting for
210 * final close on an old, invalidated, peripheral. If this is
211 * the case, queue up a deferred call to the peripheral's async
212 * handler. If it looks like a mistaken re-allocation, complain.
214 if ((periph = cam_periph_find(path, name)) != NULL) {
216 if ((periph->flags & CAM_PERIPH_INVALID) != 0
217 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
218 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
219 periph->deferred_callback = ac_callback;
220 periph->deferred_ac = code;
221 return (CAM_REQ_INPROG);
223 printf("cam_periph_alloc: attempt to re-allocate "
224 "valid device %s%d rejected flags %#x "
225 "refcount %d\n", periph->periph_name,
226 periph->unit_number, periph->flags,
229 return (CAM_REQ_INVALID);
232 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
236 return (CAM_RESRC_UNAVAIL);
241 sim = xpt_path_sim(path);
242 path_id = xpt_path_path_id(path);
243 target_id = xpt_path_target_id(path);
244 lun_id = xpt_path_lun_id(path);
245 periph->periph_start = periph_start;
246 periph->periph_dtor = periph_dtor;
247 periph->periph_oninval = periph_oninvalidate;
249 periph->periph_name = name;
250 periph->scheduled_priority = CAM_PRIORITY_NONE;
251 periph->immediate_priority = CAM_PRIORITY_NONE;
252 periph->refcount = 1; /* Dropped by invalidation. */
254 SLIST_INIT(&periph->ccb_list);
255 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
256 if (status != CAM_REQ_CMP)
261 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
262 if (strcmp((*p_drv)->driver_name, name) == 0)
265 if (*p_drv == NULL) {
266 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
268 xpt_free_path(periph->path);
269 free(periph, M_CAMPERIPH);
270 return (CAM_REQ_INVALID);
272 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
273 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
274 while (cur_periph != NULL
275 && cur_periph->unit_number < periph->unit_number)
276 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
277 if (cur_periph != NULL) {
278 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
279 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
281 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
282 (*p_drv)->generation++;
288 status = xpt_add_periph(periph);
289 if (status != CAM_REQ_CMP)
293 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
295 status = periph_ctor(periph, arg);
297 if (status == CAM_REQ_CMP)
301 switch (init_level) {
303 /* Initialized successfully */
306 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
307 xpt_remove_periph(periph);
311 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
313 xpt_free_path(periph->path);
316 free(periph, M_CAMPERIPH);
319 /* No cleanup to perform. */
322 panic("%s: Unknown init level", __func__);
328 * Find a peripheral structure with the specified path, target, lun,
329 * and (optionally) type. If the name is NULL, this function will return
330 * the first peripheral driver that matches the specified path.
333 cam_periph_find(struct cam_path *path, char *name)
335 struct periph_driver **p_drv;
336 struct cam_periph *periph;
339 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
341 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
344 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
345 if (xpt_path_comp(periph->path, path) == 0) {
347 cam_periph_assert(periph, MA_OWNED);
361 * Find peripheral driver instances attached to the specified path.
364 cam_periph_list(struct cam_path *path, struct sbuf *sb)
366 struct sbuf local_sb;
367 struct periph_driver **p_drv;
368 struct cam_periph *periph;
374 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
377 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
379 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
380 if (xpt_path_comp(periph->path, path) != 0)
383 if (sbuf_len(&local_sb) != 0)
384 sbuf_cat(&local_sb, ",");
386 sbuf_printf(&local_sb, "%s%d", periph->periph_name,
387 periph->unit_number);
389 if (sbuf_error(&local_sb) == ENOMEM) {
392 sbuf_delete(&local_sb);
399 sbuf_finish(&local_sb);
400 sbuf_cpy(sb, sbuf_data(&local_sb));
401 sbuf_delete(&local_sb);
406 cam_periph_acquire(struct cam_periph *periph)
410 status = CAM_REQ_CMP_ERR;
415 if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
417 status = CAM_REQ_CMP;
425 cam_periph_doacquire(struct cam_periph *periph)
429 KASSERT(periph->refcount >= 1,
430 ("cam_periph_doacquire() with refcount == %d", periph->refcount));
436 cam_periph_release_locked_buses(struct cam_periph *periph)
439 cam_periph_assert(periph, MA_OWNED);
440 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
441 if (--periph->refcount == 0)
442 camperiphfree(periph);
446 cam_periph_release_locked(struct cam_periph *periph)
453 cam_periph_release_locked_buses(periph);
458 cam_periph_release(struct cam_periph *periph)
465 cam_periph_assert(periph, MA_NOTOWNED);
466 mtx = cam_periph_mtx(periph);
468 cam_periph_release_locked(periph);
473 cam_periph_hold(struct cam_periph *periph, int priority)
478 * Increment the reference count on the peripheral
479 * while we wait for our lock attempt to succeed
480 * to ensure the peripheral doesn't disappear out
481 * from user us while we sleep.
484 if (cam_periph_acquire(periph) != CAM_REQ_CMP)
487 cam_periph_assert(periph, MA_OWNED);
488 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
489 periph->flags |= CAM_PERIPH_LOCK_WANTED;
490 if ((error = cam_periph_sleep(periph, periph, priority,
491 "caplck", 0)) != 0) {
492 cam_periph_release_locked(periph);
495 if (periph->flags & CAM_PERIPH_INVALID) {
496 cam_periph_release_locked(periph);
501 periph->flags |= CAM_PERIPH_LOCKED;
506 cam_periph_unhold(struct cam_periph *periph)
509 cam_periph_assert(periph, MA_OWNED);
511 periph->flags &= ~CAM_PERIPH_LOCKED;
512 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
513 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
517 cam_periph_release_locked(periph);
521 * Look for the next unit number that is not currently in use for this
522 * peripheral type starting at "newunit". Also exclude unit numbers that
523 * are reserved by for future "hardwiring" unless we already know that this
524 * is a potential wired device. Only assume that the device is "wired" the
525 * first time through the loop since after that we'll be looking at unit
526 * numbers that did not match a wiring entry.
529 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
530 path_id_t pathid, target_id_t target, lun_id_t lun)
532 struct cam_periph *periph;
534 int i, val, dunit, r;
535 const char *dname, *strval;
537 periph_name = p_drv->driver_name;
540 for (periph = TAILQ_FIRST(&p_drv->units);
541 periph != NULL && periph->unit_number != newunit;
542 periph = TAILQ_NEXT(periph, unit_links))
545 if (periph != NULL && periph->unit_number == newunit) {
547 xpt_print(periph->path, "Duplicate Wired "
549 xpt_print(periph->path, "Second device (%s "
550 "device at scbus%d target %d lun %d) will "
551 "not be wired\n", periph_name, pathid,
561 * Don't match entries like "da 4" as a wired down
562 * device, but do match entries like "da 4 target 5"
563 * or even "da 4 scbus 1".
568 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
571 /* if no "target" and no specific scbus, skip */
572 if (resource_int_value(dname, dunit, "target", &val) &&
573 (resource_string_value(dname, dunit, "at",&strval)||
574 strcmp(strval, "scbus") == 0))
576 if (newunit == dunit)
586 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
587 target_id_t target, lun_id_t lun)
590 int wired, i, val, dunit;
591 const char *dname, *strval;
592 char pathbuf[32], *periph_name;
594 periph_name = p_drv->driver_name;
595 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
599 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
601 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
602 if (strcmp(strval, pathbuf) != 0)
606 if (resource_int_value(dname, dunit, "target", &val) == 0) {
611 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
623 * Either start from 0 looking for the next unit or from
624 * the unit number given in the resource config. This way,
625 * if we have wildcard matches, we don't return the same
628 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
634 cam_periph_invalidate(struct cam_periph *periph)
637 cam_periph_assert(periph, MA_OWNED);
639 * We only call this routine the first time a peripheral is
642 if ((periph->flags & CAM_PERIPH_INVALID) != 0)
645 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
646 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) {
650 sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN);
651 xpt_denounce_periph_sbuf(periph, &sb);
655 periph->flags |= CAM_PERIPH_INVALID;
656 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
657 if (periph->periph_oninval != NULL)
658 periph->periph_oninval(periph);
659 cam_periph_release_locked(periph);
663 camperiphfree(struct cam_periph *periph)
665 struct periph_driver **p_drv;
666 struct periph_driver *drv;
668 cam_periph_assert(periph, MA_OWNED);
669 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
670 periph->periph_name, periph->unit_number));
671 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
672 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
675 if (*p_drv == NULL) {
676 printf("camperiphfree: attempt to free non-existant periph\n");
680 * Cache a pointer to the periph_driver structure. If a
681 * periph_driver is added or removed from the array (see
682 * periphdriver_register()) while we drop the toplogy lock
683 * below, p_drv may change. This doesn't protect against this
684 * particular periph_driver going away. That will require full
685 * reference counting in the periph_driver infrastructure.
690 * We need to set this flag before dropping the topology lock, to
691 * let anyone who is traversing the list that this peripheral is
692 * about to be freed, and there will be no more reference count
695 periph->flags |= CAM_PERIPH_FREE;
698 * The peripheral destructor semantics dictate calling with only the
699 * SIM mutex held. Since it might sleep, it should not be called
700 * with the topology lock held.
705 * We need to call the peripheral destructor prior to removing the
706 * peripheral from the list. Otherwise, we risk running into a
707 * scenario where the peripheral unit number may get reused
708 * (because it has been removed from the list), but some resources
709 * used by the peripheral are still hanging around. In particular,
710 * the devfs nodes used by some peripherals like the pass(4) driver
711 * aren't fully cleaned up until the destructor is run. If the
712 * unit number is reused before the devfs instance is fully gone,
715 if (periph->periph_dtor != NULL)
716 periph->periph_dtor(periph);
719 * The peripheral list is protected by the topology lock.
723 TAILQ_REMOVE(&drv->units, periph, unit_links);
726 xpt_remove_periph(periph);
729 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
730 xpt_print(periph->path, "Periph destroyed\n");
732 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
734 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
738 switch (periph->deferred_ac) {
739 case AC_FOUND_DEVICE:
740 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
741 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
745 case AC_PATH_REGISTERED:
746 ccb.ccb_h.func_code = XPT_PATH_INQ;
747 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
755 periph->deferred_callback(NULL, periph->deferred_ac,
758 xpt_free_path(periph->path);
759 free(periph, M_CAMPERIPH);
764 * Map user virtual pointers into kernel virtual address space, so we can
765 * access the memory. This is now a generic function that centralizes most
766 * of the sanity checks on the data flags, if any.
767 * This also only works for up to MAXPHYS memory. Since we use
768 * buffers to map stuff in and out, we're limited to the buffer size.
771 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
775 int flags[CAM_PERIPH_MAXMAPS];
776 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
777 u_int32_t lengths[CAM_PERIPH_MAXMAPS];
778 u_int32_t dirs[CAM_PERIPH_MAXMAPS];
781 maxmap = DFLTPHYS; /* traditional default */
782 else if (maxmap > MAXPHYS)
783 maxmap = MAXPHYS; /* for safety */
784 switch(ccb->ccb_h.func_code) {
786 if (ccb->cdm.match_buf_len == 0) {
787 printf("cam_periph_mapmem: invalid match buffer "
791 if (ccb->cdm.pattern_buf_len > 0) {
792 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
793 lengths[0] = ccb->cdm.pattern_buf_len;
794 dirs[0] = CAM_DIR_OUT;
795 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
796 lengths[1] = ccb->cdm.match_buf_len;
797 dirs[1] = CAM_DIR_IN;
800 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
801 lengths[0] = ccb->cdm.match_buf_len;
802 dirs[0] = CAM_DIR_IN;
806 * This request will not go to the hardware, no reason
807 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
812 case XPT_CONT_TARGET_IO:
813 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
815 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
817 data_ptrs[0] = &ccb->csio.data_ptr;
818 lengths[0] = ccb->csio.dxfer_len;
819 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
823 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
825 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
827 data_ptrs[0] = &ccb->ataio.data_ptr;
828 lengths[0] = ccb->ataio.dxfer_len;
829 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
833 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
835 /* Two mappings: one for cmd->data and one for cmd->data->data */
836 data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data;
837 lengths[0] = sizeof(struct mmc_data *);
838 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
839 data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data;
840 lengths[1] = ccb->mmcio.cmd.data->len;
841 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
845 data_ptrs[0] = &ccb->smpio.smp_request;
846 lengths[0] = ccb->smpio.smp_request_len;
847 dirs[0] = CAM_DIR_OUT;
848 data_ptrs[1] = &ccb->smpio.smp_response;
849 lengths[1] = ccb->smpio.smp_response_len;
850 dirs[1] = CAM_DIR_IN;
855 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
857 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
859 data_ptrs[0] = &ccb->nvmeio.data_ptr;
860 lengths[0] = ccb->nvmeio.dxfer_len;
861 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
864 case XPT_DEV_ADVINFO:
865 if (ccb->cdai.bufsiz == 0)
868 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
869 lengths[0] = ccb->cdai.bufsiz;
870 dirs[0] = CAM_DIR_IN;
874 * This request will not go to the hardware, no reason
875 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
881 break; /* NOTREACHED */
885 * Check the transfer length and permissions first, so we don't
886 * have to unmap any previously mapped buffers.
888 for (i = 0; i < numbufs; i++) {
893 * The userland data pointer passed in may not be page
894 * aligned. vmapbuf() truncates the address to a page
895 * boundary, so if the address isn't page aligned, we'll
896 * need enough space for the given transfer length, plus
897 * whatever extra space is necessary to make it to the page
901 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){
902 printf("cam_periph_mapmem: attempt to map %lu bytes, "
903 "which is greater than %lu\n",
905 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)),
910 if (dirs[i] & CAM_DIR_OUT) {
911 flags[i] = BIO_WRITE;
914 if (dirs[i] & CAM_DIR_IN) {
921 * This keeps the kernel stack of current thread from getting
922 * swapped. In low-memory situations where the kernel stack might
923 * otherwise get swapped out, this holds it and allows the thread
924 * to make progress and release the kernel mapped pages sooner.
926 * XXX KDM should I use P_NOSWAP instead?
930 for (i = 0; i < numbufs; i++) {
934 mapinfo->bp[i] = getpbuf(NULL);
936 /* put our pointer in the data slot */
937 mapinfo->bp[i]->b_data = *data_ptrs[i];
939 /* save the user's data address */
940 mapinfo->bp[i]->b_caller1 = *data_ptrs[i];
942 /* set the transfer length, we know it's < MAXPHYS */
943 mapinfo->bp[i]->b_bufsize = lengths[i];
945 /* set the direction */
946 mapinfo->bp[i]->b_iocmd = flags[i];
949 * Map the buffer into kernel memory.
951 * Note that useracc() alone is not a sufficient test.
952 * vmapbuf() can still fail due to a smaller file mapped
953 * into a larger area of VM, or if userland races against
954 * vmapbuf() after the useracc() check.
956 if (vmapbuf(mapinfo->bp[i], 1) < 0) {
957 for (j = 0; j < i; ++j) {
958 *data_ptrs[j] = mapinfo->bp[j]->b_caller1;
959 vunmapbuf(mapinfo->bp[j]);
960 relpbuf(mapinfo->bp[j], NULL);
962 relpbuf(mapinfo->bp[i], NULL);
967 /* set our pointer to the new mapped area */
968 *data_ptrs[i] = mapinfo->bp[i]->b_data;
970 mapinfo->num_bufs_used++;
974 * Now that we've gotten this far, change ownership to the kernel
975 * of the buffers so that we don't run afoul of returning to user
976 * space with locks (on the buffer) held.
978 for (i = 0; i < numbufs; i++) {
979 BUF_KERNPROC(mapinfo->bp[i]);
987 * Unmap memory segments mapped into kernel virtual address space by
988 * cam_periph_mapmem().
991 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
994 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
996 if (mapinfo->num_bufs_used <= 0) {
997 /* nothing to free and the process wasn't held. */
1001 switch (ccb->ccb_h.func_code) {
1003 numbufs = min(mapinfo->num_bufs_used, 2);
1006 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
1008 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
1009 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
1013 case XPT_CONT_TARGET_IO:
1014 data_ptrs[0] = &ccb->csio.data_ptr;
1015 numbufs = min(mapinfo->num_bufs_used, 1);
1018 data_ptrs[0] = &ccb->ataio.data_ptr;
1019 numbufs = min(mapinfo->num_bufs_used, 1);
1022 numbufs = min(mapinfo->num_bufs_used, 2);
1023 data_ptrs[0] = &ccb->smpio.smp_request;
1024 data_ptrs[1] = &ccb->smpio.smp_response;
1026 case XPT_DEV_ADVINFO:
1027 numbufs = min(mapinfo->num_bufs_used, 1);
1028 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1031 case XPT_NVME_ADMIN:
1032 data_ptrs[0] = &ccb->nvmeio.data_ptr;
1033 numbufs = min(mapinfo->num_bufs_used, 1);
1036 /* allow ourselves to be swapped once again */
1039 break; /* NOTREACHED */
1042 for (i = 0; i < numbufs; i++) {
1043 /* Set the user's pointer back to the original value */
1044 *data_ptrs[i] = mapinfo->bp[i]->b_caller1;
1046 /* unmap the buffer */
1047 vunmapbuf(mapinfo->bp[i]);
1049 /* release the buffer */
1050 relpbuf(mapinfo->bp[i], NULL);
1053 /* allow ourselves to be swapped once again */
1058 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
1059 int (*error_routine)(union ccb *ccb,
1061 u_int32_t sense_flags))
1070 case CAMGETPASSTHRU:
1071 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1072 xpt_setup_ccb(&ccb->ccb_h,
1074 CAM_PRIORITY_NORMAL);
1075 ccb->ccb_h.func_code = XPT_GDEVLIST;
1078 * Basically, the point of this is that we go through
1079 * getting the list of devices, until we find a passthrough
1080 * device. In the current version of the CAM code, the
1081 * only way to determine what type of device we're dealing
1082 * with is by its name.
1084 while (found == 0) {
1085 ccb->cgdl.index = 0;
1086 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
1087 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
1089 /* we want the next device in the list */
1091 if (strncmp(ccb->cgdl.periph_name,
1097 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
1099 ccb->cgdl.periph_name[0] = '\0';
1100 ccb->cgdl.unit_number = 0;
1105 /* copy the result back out */
1106 bcopy(ccb, addr, sizeof(union ccb));
1108 /* and release the ccb */
1109 xpt_release_ccb(ccb);
1120 cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
1123 panic("%s: already done with ccb %p", __func__, done_ccb);
1127 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1130 /* Caller will release the CCB */
1131 xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
1132 done_ccb->ccb_h.cbfcnp = cam_periph_done_panic;
1133 wakeup(&done_ccb->ccb_h.cbfcnp);
1137 cam_periph_ccbwait(union ccb *ccb)
1140 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
1141 while (ccb->ccb_h.cbfcnp != cam_periph_done_panic)
1142 xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp,
1143 PRIBIO, "cbwait", 0);
1145 KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
1146 (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG,
1147 ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
1148 "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
1149 ccb->ccb_h.status, ccb->ccb_h.pinfo.index));
1153 cam_periph_runccb(union ccb *ccb,
1154 int (*error_routine)(union ccb *ccb,
1156 u_int32_t sense_flags),
1157 cam_flags camflags, u_int32_t sense_flags,
1160 struct bintime *starttime;
1161 struct bintime ltime;
1165 xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1166 KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
1167 ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
1168 ccb->ccb_h.func_code, ccb->ccb_h.flags));
1171 * If the user has supplied a stats structure, and if we understand
1172 * this particular type of ccb, record the transaction start.
1175 (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1176 ccb->ccb_h.func_code == XPT_ATA_IO ||
1177 ccb->ccb_h.func_code == XPT_NVME_IO)) {
1179 binuptime(starttime);
1180 devstat_start_transaction(ds, starttime);
1183 ccb->ccb_h.cbfcnp = cam_periph_done;
1187 cam_periph_ccbwait(ccb);
1188 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1190 else if (error_routine != NULL) {
1191 ccb->ccb_h.cbfcnp = cam_periph_done;
1192 error = (*error_routine)(ccb, camflags, sense_flags);
1196 } while (error == ERESTART);
1198 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1199 cam_release_devq(ccb->ccb_h.path,
1200 /* relsim_flags */0,
1203 /* getcount_only */ FALSE);
1204 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1209 devstat_tag_type tag;
1212 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1213 bytes = ccb->csio.dxfer_len - ccb->csio.resid;
1214 tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3);
1215 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1216 bytes = ccb->ataio.dxfer_len - ccb->ataio.resid;
1217 tag = (devstat_tag_type)0;
1218 } else if (ccb->ccb_h.func_code == XPT_NVME_IO) {
1219 bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */
1220 tag = (devstat_tag_type)0;
1225 devstat_end_transaction(ds, bytes, tag,
1226 ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ?
1227 DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1228 DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime);
1235 cam_freeze_devq(struct cam_path *path)
1237 struct ccb_hdr ccb_h;
1239 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1240 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1241 ccb_h.func_code = XPT_NOOP;
1242 ccb_h.flags = CAM_DEV_QFREEZE;
1243 xpt_action((union ccb *)&ccb_h);
1247 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1248 u_int32_t openings, u_int32_t arg,
1251 struct ccb_relsim crs;
1253 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1254 relsim_flags, openings, arg, getcount_only));
1255 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1256 crs.ccb_h.func_code = XPT_REL_SIMQ;
1257 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1258 crs.release_flags = relsim_flags;
1259 crs.openings = openings;
1260 crs.release_timeout = arg;
1261 xpt_action((union ccb *)&crs);
1262 return (crs.qfrozen_cnt);
1265 #define saved_ccb_ptr ppriv_ptr0
1267 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1269 union ccb *saved_ccb;
1271 struct scsi_start_stop_unit *scsi_cmd;
1272 int error_code, sense_key, asc, ascq;
1274 scsi_cmd = (struct scsi_start_stop_unit *)
1275 &done_ccb->csio.cdb_io.cdb_bytes;
1276 status = done_ccb->ccb_h.status;
1278 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1279 if (scsi_extract_sense_ccb(done_ccb,
1280 &error_code, &sense_key, &asc, &ascq)) {
1282 * If the error is "invalid field in CDB",
1283 * and the load/eject flag is set, turn the
1284 * flag off and try again. This is just in
1285 * case the drive in question barfs on the
1286 * load eject flag. The CAM code should set
1287 * the load/eject flag by default for
1290 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1291 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1292 (asc == 0x24) && (ascq == 0x00)) {
1293 scsi_cmd->how &= ~SSS_LOEJ;
1294 if (status & CAM_DEV_QFRZN) {
1295 cam_release_devq(done_ccb->ccb_h.path,
1297 done_ccb->ccb_h.status &=
1300 xpt_action(done_ccb);
1304 if (cam_periph_error(done_ccb,
1305 0, SF_RETRY_UA | SF_NO_PRINT) == ERESTART)
1307 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1308 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1309 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1313 * If we have successfully taken a device from the not
1314 * ready to ready state, re-scan the device and re-get
1315 * the inquiry information. Many devices (mostly disks)
1316 * don't properly report their inquiry information unless
1319 if (scsi_cmd->opcode == START_STOP_UNIT)
1320 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1324 * Perform the final retry with the original CCB so that final
1325 * error processing is performed by the owner of the CCB.
1327 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1328 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
1329 xpt_free_ccb(saved_ccb);
1330 if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1331 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1332 xpt_action(done_ccb);
1335 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1336 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1340 * Generic Async Event handler. Peripheral drivers usually
1341 * filter out the events that require personal attention,
1342 * and leave the rest to this function.
1345 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1346 struct cam_path *path, void *arg)
1349 case AC_LOST_DEVICE:
1350 cam_periph_invalidate(periph);
1358 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1360 struct ccb_getdevstats cgds;
1362 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1363 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1364 xpt_action((union ccb *)&cgds);
1365 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1369 cam_periph_freeze_after_event(struct cam_periph *periph,
1370 struct timeval* event_time, u_int duration_ms)
1372 struct timeval delta;
1373 struct timeval duration_tv;
1375 if (!timevalisset(event_time))
1379 timevalsub(&delta, event_time);
1380 duration_tv.tv_sec = duration_ms / 1000;
1381 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1382 if (timevalcmp(&delta, &duration_tv, <)) {
1383 timevalsub(&duration_tv, &delta);
1385 duration_ms = duration_tv.tv_sec * 1000;
1386 duration_ms += duration_tv.tv_usec / 1000;
1387 cam_freeze_devq(periph->path);
1388 cam_release_devq(periph->path,
1389 RELSIM_RELEASE_AFTER_TIMEOUT,
1391 /*timeout*/duration_ms,
1392 /*getcount_only*/0);
1398 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1399 cam_flags camflags, u_int32_t sense_flags,
1400 int *openings, u_int32_t *relsim_flags,
1401 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1405 switch (ccb->csio.scsi_status) {
1406 case SCSI_STATUS_OK:
1407 case SCSI_STATUS_COND_MET:
1408 case SCSI_STATUS_INTERMED:
1409 case SCSI_STATUS_INTERMED_COND_MET:
1412 case SCSI_STATUS_CMD_TERMINATED:
1413 case SCSI_STATUS_CHECK_COND:
1414 error = camperiphscsisenseerror(ccb, orig_ccb,
1423 case SCSI_STATUS_QUEUE_FULL:
1426 struct ccb_getdevstats cgds;
1429 * First off, find out what the current
1430 * transaction counts are.
1432 xpt_setup_ccb(&cgds.ccb_h,
1434 CAM_PRIORITY_NORMAL);
1435 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1436 xpt_action((union ccb *)&cgds);
1439 * If we were the only transaction active, treat
1440 * the QUEUE FULL as if it were a BUSY condition.
1442 if (cgds.dev_active != 0) {
1446 * Reduce the number of openings to
1447 * be 1 less than the amount it took
1448 * to get a queue full bounded by the
1449 * minimum allowed tag count for this
1452 total_openings = cgds.dev_active + cgds.dev_openings;
1453 *openings = cgds.dev_active;
1454 if (*openings < cgds.mintags)
1455 *openings = cgds.mintags;
1456 if (*openings < total_openings)
1457 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1460 * Some devices report queue full for
1461 * temporary resource shortages. For
1462 * this reason, we allow a minimum
1463 * tag count to be entered via a
1464 * quirk entry to prevent the queue
1465 * count on these devices from falling
1466 * to a pessimisticly low value. We
1467 * still wait for the next successful
1468 * completion, however, before queueing
1469 * more transactions to the device.
1471 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1475 *action &= ~SSQ_PRINT_SENSE;
1480 case SCSI_STATUS_BUSY:
1482 * Restart the queue after either another
1483 * command completes or a 1 second timeout.
1485 if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1486 (ccb->ccb_h.retry_count--) > 0) {
1488 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1489 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1495 case SCSI_STATUS_RESERV_CONFLICT:
1504 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1505 cam_flags camflags, u_int32_t sense_flags,
1506 int *openings, u_int32_t *relsim_flags,
1507 u_int32_t *timeout, u_int32_t *action, const char **action_string)
1509 struct cam_periph *periph;
1510 union ccb *orig_ccb = ccb;
1511 int error, recoveryccb;
1513 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1514 if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL)
1515 biotrack(ccb->csio.bio, __func__);
1518 periph = xpt_path_periph(ccb->ccb_h.path);
1519 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1520 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1522 * If error recovery is already in progress, don't attempt
1523 * to process this error, but requeue it unconditionally
1524 * and attempt to process it once error recovery has
1525 * completed. This failed command is probably related to
1526 * the error that caused the currently active error recovery
1527 * action so our current recovery efforts should also
1528 * address this command. Be aware that the error recovery
1529 * code assumes that only one recovery action is in progress
1530 * on a particular peripheral instance at any given time
1531 * (e.g. only one saved CCB for error recovery) so it is
1532 * imperitive that we don't violate this assumption.
1535 *action &= ~SSQ_PRINT_SENSE;
1537 scsi_sense_action err_action;
1538 struct ccb_getdev cgd;
1541 * Grab the inquiry data for this device.
1543 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1544 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1545 xpt_action((union ccb *)&cgd);
1547 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1549 error = err_action & SS_ERRMASK;
1552 * Do not autostart sequential access devices
1553 * to avoid unexpected tape loading.
1555 if ((err_action & SS_MASK) == SS_START &&
1556 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1557 *action_string = "Will not autostart a "
1558 "sequential access device";
1559 goto sense_error_done;
1563 * Avoid recovery recursion if recovery action is the same.
1565 if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1566 if (((err_action & SS_MASK) == SS_START &&
1567 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1568 ((err_action & SS_MASK) == SS_TUR &&
1569 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1570 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1571 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1577 * If the recovery action will consume a retry,
1578 * make sure we actually have retries available.
1580 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1581 if (ccb->ccb_h.retry_count > 0 &&
1582 (periph->flags & CAM_PERIPH_INVALID) == 0)
1583 ccb->ccb_h.retry_count--;
1585 *action_string = "Retries exhausted";
1586 goto sense_error_done;
1590 if ((err_action & SS_MASK) >= SS_START) {
1592 * Do common portions of commands that
1593 * use recovery CCBs.
1595 orig_ccb = xpt_alloc_ccb_nowait();
1596 if (orig_ccb == NULL) {
1597 *action_string = "Can't allocate recovery CCB";
1598 goto sense_error_done;
1601 * Clear freeze flag for original request here, as
1602 * this freeze will be dropped as part of ERESTART.
1604 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1605 bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
1608 switch (err_action & SS_MASK) {
1610 *action_string = "No recovery action needed";
1614 *action_string = "Retrying command (per sense data)";
1618 *action_string = "Unretryable error";
1625 * Send a start unit command to the device, and
1626 * then retry the command.
1628 *action_string = "Attempting to start unit";
1629 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1632 * Check for removable media and set
1633 * load/eject flag appropriately.
1635 if (SID_IS_REMOVABLE(&cgd.inq_data))
1640 scsi_start_stop(&ccb->csio,
1654 * Send a Test Unit Ready to the device.
1655 * If the 'many' flag is set, we send 120
1656 * test unit ready commands, one every half
1657 * second. Otherwise, we just send one TUR.
1658 * We only want to do this if the retry
1659 * count has not been exhausted.
1663 if ((err_action & SSQ_MANY) != 0) {
1664 *action_string = "Polling device for readiness";
1667 *action_string = "Testing device for readiness";
1670 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1671 scsi_test_unit_ready(&ccb->csio,
1679 * Accomplish our 500ms delay by deferring
1680 * the release of our device queue appropriately.
1682 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1687 panic("Unhandled error action %x", err_action);
1690 if ((err_action & SS_MASK) >= SS_START) {
1692 * Drop the priority, so that the recovery
1693 * CCB is the first to execute. Freeze the queue
1694 * after this command is sent so that we can
1695 * restore the old csio and have it queued in
1696 * the proper order before we release normal
1697 * transactions to the device.
1699 ccb->ccb_h.pinfo.priority--;
1700 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1701 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1707 *action = err_action;
1713 * Generic error handler. Peripheral drivers usually filter
1714 * out the errors that they handle in a unique manner, then
1715 * call this function.
1718 cam_periph_error(union ccb *ccb, cam_flags camflags,
1719 u_int32_t sense_flags)
1721 struct cam_path *newpath;
1722 union ccb *orig_ccb, *scan_ccb;
1723 struct cam_periph *periph;
1724 const char *action_string;
1726 int frozen, error, openings, devctl_err;
1727 u_int32_t action, relsim_flags, timeout;
1729 action = SSQ_PRINT_SENSE;
1730 periph = xpt_path_periph(ccb->ccb_h.path);
1731 action_string = NULL;
1732 status = ccb->ccb_h.status;
1733 frozen = (status & CAM_DEV_QFRZN) != 0;
1734 status &= CAM_STATUS_MASK;
1735 devctl_err = openings = relsim_flags = timeout = 0;
1738 /* Filter the errors that should be reported via devctl */
1739 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1740 case CAM_CMD_TIMEOUT:
1741 case CAM_REQ_ABORTED:
1742 case CAM_REQ_CMP_ERR:
1743 case CAM_REQ_TERMIO:
1744 case CAM_UNREC_HBA_ERROR:
1745 case CAM_DATA_RUN_ERR:
1746 case CAM_SCSI_STATUS_ERROR:
1747 case CAM_ATA_STATUS_ERROR:
1748 case CAM_SMP_STATUS_ERROR:
1758 action &= ~SSQ_PRINT_SENSE;
1760 case CAM_SCSI_STATUS_ERROR:
1761 error = camperiphscsistatuserror(ccb, &orig_ccb,
1762 camflags, sense_flags, &openings, &relsim_flags,
1763 &timeout, &action, &action_string);
1765 case CAM_AUTOSENSE_FAIL:
1766 error = EIO; /* we have to kill the command */
1770 case CAM_MSG_REJECT_REC:
1771 /* XXX Don't know that these are correct */
1774 case CAM_SEL_TIMEOUT:
1775 if ((camflags & CAM_RETRY_SELTO) != 0) {
1776 if (ccb->ccb_h.retry_count > 0 &&
1777 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1778 ccb->ccb_h.retry_count--;
1782 * Wait a bit to give the device
1783 * time to recover before we try again.
1785 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1786 timeout = periph_selto_delay;
1789 action_string = "Retries exhausted";
1792 case CAM_DEV_NOT_THERE:
1796 case CAM_REQ_INVALID:
1797 case CAM_PATH_INVALID:
1799 case CAM_PROVIDE_FAIL:
1800 case CAM_REQ_TOO_BIG:
1801 case CAM_LUN_INVALID:
1802 case CAM_TID_INVALID:
1803 case CAM_FUNC_NOTAVAIL:
1806 case CAM_SCSI_BUS_RESET:
1809 * Commands that repeatedly timeout and cause these
1810 * kinds of error recovery actions, should return
1811 * CAM_CMD_TIMEOUT, which allows us to safely assume
1812 * that this command was an innocent bystander to
1813 * these events and should be unconditionally
1816 case CAM_REQUEUE_REQ:
1817 /* Unconditional requeue if device is still there */
1818 if (periph->flags & CAM_PERIPH_INVALID) {
1819 action_string = "Periph was invalidated";
1821 } else if (sense_flags & SF_NO_RETRY) {
1823 action_string = "Retry was blocked";
1826 action &= ~SSQ_PRINT_SENSE;
1829 case CAM_RESRC_UNAVAIL:
1830 /* Wait a bit for the resource shortage to abate. */
1831 timeout = periph_noresrc_delay;
1835 /* Wait a bit for the busy condition to abate. */
1836 timeout = periph_busy_delay;
1838 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1840 case CAM_ATA_STATUS_ERROR:
1841 case CAM_REQ_CMP_ERR:
1842 case CAM_CMD_TIMEOUT:
1843 case CAM_UNEXP_BUSFREE:
1844 case CAM_UNCOR_PARITY:
1845 case CAM_DATA_RUN_ERR:
1847 if (periph->flags & CAM_PERIPH_INVALID) {
1849 action_string = "Periph was invalidated";
1850 } else if (ccb->ccb_h.retry_count == 0) {
1852 action_string = "Retries exhausted";
1853 } else if (sense_flags & SF_NO_RETRY) {
1855 action_string = "Retry was blocked";
1857 ccb->ccb_h.retry_count--;
1863 if ((sense_flags & SF_PRINT_ALWAYS) ||
1864 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
1865 action |= SSQ_PRINT_SENSE;
1866 else if (sense_flags & SF_NO_PRINT)
1867 action &= ~SSQ_PRINT_SENSE;
1868 if ((action & SSQ_PRINT_SENSE) != 0)
1869 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1870 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
1871 if (error != ERESTART) {
1872 if (action_string == NULL)
1873 action_string = "Unretryable error";
1874 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
1875 error, action_string);
1876 } else if (action_string != NULL)
1877 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
1879 xpt_print(ccb->ccb_h.path, "Retrying command\n");
1882 if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
1883 cam_periph_devctl_notify(orig_ccb);
1885 if ((action & SSQ_LOST) != 0) {
1889 * For a selection timeout, we consider all of the LUNs on
1890 * the target to be gone. If the status is CAM_DEV_NOT_THERE,
1891 * then we only get rid of the device(s) specified by the
1892 * path in the original CCB.
1894 if (status == CAM_SEL_TIMEOUT)
1895 lun_id = CAM_LUN_WILDCARD;
1897 lun_id = xpt_path_lun_id(ccb->ccb_h.path);
1899 /* Should we do more if we can't create the path?? */
1900 if (xpt_create_path(&newpath, periph,
1901 xpt_path_path_id(ccb->ccb_h.path),
1902 xpt_path_target_id(ccb->ccb_h.path),
1903 lun_id) == CAM_REQ_CMP) {
1906 * Let peripheral drivers know that this
1907 * device has gone away.
1909 xpt_async(AC_LOST_DEVICE, newpath, NULL);
1910 xpt_free_path(newpath);
1914 /* Broadcast UNIT ATTENTIONs to all periphs. */
1915 if ((action & SSQ_UA) != 0)
1916 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
1918 /* Rescan target on "Reported LUNs data has changed" */
1919 if ((action & SSQ_RESCAN) != 0) {
1920 if (xpt_create_path(&newpath, NULL,
1921 xpt_path_path_id(ccb->ccb_h.path),
1922 xpt_path_target_id(ccb->ccb_h.path),
1923 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
1925 scan_ccb = xpt_alloc_ccb_nowait();
1926 if (scan_ccb != NULL) {
1927 scan_ccb->ccb_h.path = newpath;
1928 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
1929 scan_ccb->crcn.flags = 0;
1930 xpt_rescan(scan_ccb);
1933 "Can't allocate CCB to rescan target\n");
1934 xpt_free_path(newpath);
1939 /* Attempt a retry */
1940 if (error == ERESTART || error == 0) {
1942 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1943 if (error == ERESTART)
1946 cam_release_devq(ccb->ccb_h.path,
1950 /*getcount_only*/0);
1956 #define CAM_PERIPH_DEVD_MSG_SIZE 256
1959 cam_periph_devctl_notify(union ccb *ccb)
1961 struct cam_periph *periph;
1962 struct ccb_getdev *cgd;
1964 int serr, sk, asc, ascq;
1967 sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
1971 sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
1973 periph = xpt_path_periph(ccb->ccb_h.path);
1974 sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
1975 periph->unit_number);
1977 sbuf_printf(&sb, "serial=\"");
1978 if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
1979 xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
1980 CAM_PRIORITY_NORMAL);
1981 cgd->ccb_h.func_code = XPT_GDEV_TYPE;
1982 xpt_action((union ccb *)cgd);
1984 if (cgd->ccb_h.status == CAM_REQ_CMP)
1985 sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
1986 xpt_free_ccb((union ccb *)cgd);
1988 sbuf_printf(&sb, "\" ");
1989 sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
1991 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1992 case CAM_CMD_TIMEOUT:
1993 sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
1996 case CAM_SCSI_STATUS_ERROR:
1997 sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
1998 if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
1999 sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
2000 serr, sk, asc, ascq);
2003 case CAM_ATA_STATUS_ERROR:
2004 sbuf_printf(&sb, "RES=\"");
2005 ata_res_sbuf(&ccb->ataio.res, &sb);
2006 sbuf_printf(&sb, "\" ");
2014 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2015 sbuf_printf(&sb, "CDB=\"");
2016 scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
2017 sbuf_printf(&sb, "\" ");
2018 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
2019 sbuf_printf(&sb, "ACB=\"");
2020 ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
2021 sbuf_printf(&sb, "\" ");
2024 if (sbuf_finish(&sb) == 0)
2025 devctl_notify("CAM", "periph", type, sbuf_data(&sb));
2027 free(sbmsg, M_CAMPERIPH);