2 * Common functions for CAM "type" (peripheral) drivers.
4 * SPDX-License-Identifier: BSD-2-Clause
6 * Copyright (c) 1997, 1998 Justin T. Gibbs.
7 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification, immediately at the beginning of the file.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
39 #include <sys/devctl.h>
41 #include <sys/mutex.h>
44 #include <sys/devicestat.h>
46 #include <sys/sysctl.h>
48 #include <vm/vm_extern.h>
51 #include <cam/cam_ccb.h>
52 #include <cam/cam_compat.h>
53 #include <cam/cam_queue.h>
54 #include <cam/cam_xpt_periph.h>
55 #include <cam/cam_xpt_internal.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_debug.h>
58 #include <cam/cam_sim.h>
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_message.h>
62 #include <cam/scsi/scsi_pass.h>
64 static u_int camperiphnextunit(struct periph_driver *p_drv,
65 u_int newunit, bool wired,
66 path_id_t pathid, target_id_t target,
68 static u_int camperiphunit(struct periph_driver *p_drv,
69 path_id_t pathid, target_id_t target,
72 static void camperiphdone(struct cam_periph *periph,
74 static void camperiphfree(struct cam_periph *periph);
75 static int camperiphscsistatuserror(union ccb *ccb,
80 uint32_t *relsim_flags,
83 const char **action_string);
84 static int camperiphscsisenseerror(union ccb *ccb,
89 uint32_t *relsim_flags,
92 const char **action_string);
93 static void cam_periph_devctl_notify(union ccb *ccb);
95 static int nperiph_drivers;
96 static int initialized = 0;
97 struct periph_driver **periph_drivers;
99 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
101 static int periph_selto_delay = 1000;
102 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
103 static int periph_noresrc_delay = 500;
104 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
105 static int periph_busy_delay = 500;
106 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
108 static u_int periph_mapmem_thresh = 65536;
109 SYSCTL_UINT(_kern_cam, OID_AUTO, mapmem_thresh, CTLFLAG_RWTUN,
110 &periph_mapmem_thresh, 0, "Threshold for user-space buffer mapping");
113 periphdriver_register(void *data)
115 struct periph_driver *drv = (struct periph_driver *)data;
116 struct periph_driver **newdrivers, **old;
120 ndrivers = nperiph_drivers + 2;
121 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
124 if (ndrivers != nperiph_drivers + 2) {
126 * Lost race against itself; go around.
129 free(newdrivers, M_CAMPERIPH);
133 bcopy(periph_drivers, newdrivers,
134 sizeof(*newdrivers) * nperiph_drivers);
135 newdrivers[nperiph_drivers] = drv;
136 newdrivers[nperiph_drivers + 1] = NULL;
137 old = periph_drivers;
138 periph_drivers = newdrivers;
142 free(old, M_CAMPERIPH);
143 /* If driver marked as early or it is late now, initialize it. */
144 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
150 periphdriver_unregister(void *data)
152 struct periph_driver *drv = (struct periph_driver *)data;
155 /* If driver marked as early or it is late now, deinitialize it. */
156 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
158 if (drv->deinit == NULL) {
159 printf("CAM periph driver '%s' doesn't have deinit.\n",
163 error = drv->deinit();
169 for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
171 KASSERT(n < nperiph_drivers,
172 ("Periph driver '%s' was not registered", drv->driver_name));
173 for (; n + 1 < nperiph_drivers; n++)
174 periph_drivers[n] = periph_drivers[n + 1];
175 periph_drivers[n + 1] = NULL;
182 periphdriver_init(int level)
186 initialized = max(initialized, level);
187 for (i = 0; periph_drivers[i] != NULL; i++) {
188 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
189 if (early == initialized)
190 (*periph_drivers[i]->init)();
195 cam_periph_alloc(periph_ctor_t *periph_ctor,
196 periph_oninv_t *periph_oninvalidate,
197 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
198 char *name, cam_periph_type type, struct cam_path *path,
199 ac_callback_t *ac_callback, ac_code code, void *arg)
201 struct periph_driver **p_drv;
203 struct cam_periph *periph;
204 struct cam_periph *cur_periph;
206 target_id_t target_id;
213 * Handle Hot-Plug scenarios. If there is already a peripheral
214 * of our type assigned to this path, we are likely waiting for
215 * final close on an old, invalidated, peripheral. If this is
216 * the case, queue up a deferred call to the peripheral's async
217 * handler. If it looks like a mistaken re-allocation, complain.
219 if ((periph = cam_periph_find(path, name)) != NULL) {
220 if ((periph->flags & CAM_PERIPH_INVALID) != 0
221 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
222 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
223 periph->deferred_callback = ac_callback;
224 periph->deferred_ac = code;
225 return (CAM_REQ_INPROG);
227 printf("cam_periph_alloc: attempt to re-allocate "
228 "valid device %s%d rejected flags %#x "
229 "refcount %d\n", periph->periph_name,
230 periph->unit_number, periph->flags,
233 return (CAM_REQ_INVALID);
236 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
240 return (CAM_RESRC_UNAVAIL);
244 sim = xpt_path_sim(path);
245 path_id = xpt_path_path_id(path);
246 target_id = xpt_path_target_id(path);
247 lun_id = xpt_path_lun_id(path);
248 periph->periph_start = periph_start;
249 periph->periph_dtor = periph_dtor;
250 periph->periph_oninval = periph_oninvalidate;
252 periph->periph_name = name;
253 periph->scheduled_priority = CAM_PRIORITY_NONE;
254 periph->immediate_priority = CAM_PRIORITY_NONE;
255 periph->refcount = 1; /* Dropped by invalidation. */
257 SLIST_INIT(&periph->ccb_list);
258 status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
259 if (status != CAM_REQ_CMP)
264 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
265 if (strcmp((*p_drv)->driver_name, name) == 0)
268 if (*p_drv == NULL) {
269 printf("cam_periph_alloc: invalid periph name '%s'\n", name);
271 xpt_free_path(periph->path);
272 free(periph, M_CAMPERIPH);
273 return (CAM_REQ_INVALID);
275 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id,
276 path->device->serial_num);
277 cur_periph = TAILQ_FIRST(&(*p_drv)->units);
278 while (cur_periph != NULL
279 && cur_periph->unit_number < periph->unit_number)
280 cur_periph = TAILQ_NEXT(cur_periph, unit_links);
281 if (cur_periph != NULL) {
282 KASSERT(cur_periph->unit_number != periph->unit_number,
283 ("duplicate units on periph list"));
284 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
286 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
287 (*p_drv)->generation++;
293 status = xpt_add_periph(periph);
294 if (status != CAM_REQ_CMP)
298 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
300 status = periph_ctor(periph, arg);
302 if (status == CAM_REQ_CMP)
306 switch (init_level) {
308 /* Initialized successfully */
311 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
312 xpt_remove_periph(periph);
316 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
318 xpt_free_path(periph->path);
321 free(periph, M_CAMPERIPH);
324 /* No cleanup to perform. */
327 panic("%s: Unknown init level", __func__);
333 * Find a peripheral structure with the specified path, target, lun,
334 * and (optionally) type. If the name is NULL, this function will return
335 * the first peripheral driver that matches the specified path.
338 cam_periph_find(struct cam_path *path, char *name)
340 struct periph_driver **p_drv;
341 struct cam_periph *periph;
344 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
345 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
348 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
349 if (xpt_path_comp(periph->path, path) == 0) {
351 cam_periph_assert(periph, MA_OWNED);
365 * Find peripheral driver instances attached to the specified path.
368 cam_periph_list(struct cam_path *path, struct sbuf *sb)
370 struct sbuf local_sb;
371 struct periph_driver **p_drv;
372 struct cam_periph *periph;
378 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
381 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
382 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
383 if (xpt_path_comp(periph->path, path) != 0)
386 if (sbuf_len(&local_sb) != 0)
387 sbuf_cat(&local_sb, ",");
389 sbuf_printf(&local_sb, "%s%d", periph->periph_name,
390 periph->unit_number);
392 if (sbuf_error(&local_sb) == ENOMEM) {
395 sbuf_delete(&local_sb);
402 sbuf_finish(&local_sb);
403 if (sbuf_len(sb) != 0)
405 sbuf_cat(sb, sbuf_data(&local_sb));
406 sbuf_delete(&local_sb);
411 cam_periph_acquire(struct cam_periph *periph)
420 if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
430 cam_periph_doacquire(struct cam_periph *periph)
434 KASSERT(periph->refcount >= 1,
435 ("cam_periph_doacquire() with refcount == %d", periph->refcount));
441 cam_periph_release_locked_buses(struct cam_periph *periph)
444 cam_periph_assert(periph, MA_OWNED);
445 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
446 if (--periph->refcount == 0)
447 camperiphfree(periph);
451 cam_periph_release_locked(struct cam_periph *periph)
458 cam_periph_release_locked_buses(periph);
463 cam_periph_release(struct cam_periph *periph)
470 cam_periph_assert(periph, MA_NOTOWNED);
471 mtx = cam_periph_mtx(periph);
473 cam_periph_release_locked(periph);
478 * hold/unhold act as mutual exclusion for sections of the code that
479 * need to sleep and want to make sure that other sections that
480 * will interfere are held off. This only protects exclusive sections
484 cam_periph_hold(struct cam_periph *periph, int priority)
489 * Increment the reference count on the peripheral
490 * while we wait for our lock attempt to succeed
491 * to ensure the peripheral doesn't disappear out
492 * from user us while we sleep.
495 if (cam_periph_acquire(periph) != 0)
498 cam_periph_assert(periph, MA_OWNED);
499 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
500 periph->flags |= CAM_PERIPH_LOCK_WANTED;
501 if ((error = cam_periph_sleep(periph, periph, priority,
502 "caplck", 0)) != 0) {
503 cam_periph_release_locked(periph);
506 if (periph->flags & CAM_PERIPH_INVALID) {
507 cam_periph_release_locked(periph);
512 periph->flags |= CAM_PERIPH_LOCKED;
517 cam_periph_unhold(struct cam_periph *periph)
520 cam_periph_assert(periph, MA_OWNED);
522 periph->flags &= ~CAM_PERIPH_LOCKED;
523 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
524 periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
528 cam_periph_release_locked(periph);
532 cam_periph_hold_boot(struct cam_periph *periph)
535 root_mount_hold_token(periph->periph_name, &periph->periph_rootmount);
539 cam_periph_release_boot(struct cam_periph *periph)
542 root_mount_rel(&periph->periph_rootmount);
546 * Look for the next unit number that is not currently in use for this
547 * peripheral type starting at "newunit". Also exclude unit numbers that
548 * are reserved by for future "hardwiring" unless we already know that this
549 * is a potential wired device. Only assume that the device is "wired" the
550 * first time through the loop since after that we'll be looking at unit
551 * numbers that did not match a wiring entry.
554 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, bool wired,
555 path_id_t pathid, target_id_t target, lun_id_t lun)
557 struct cam_periph *periph;
559 int i, val, dunit, r;
560 const char *dname, *strval;
562 periph_name = p_drv->driver_name;
564 for (periph = TAILQ_FIRST(&p_drv->units);
565 periph != NULL && periph->unit_number != newunit;
566 periph = TAILQ_NEXT(periph, unit_links))
569 if (periph != NULL && periph->unit_number == newunit) {
571 xpt_print(periph->path, "Duplicate Wired "
573 xpt_print(periph->path, "Second device (%s "
574 "device at scbus%d target %d lun %d) will "
575 "not be wired\n", periph_name, pathid,
585 * Don't allow the mere presence of any attributes of a device
586 * means that it is for a wired down entry. Instead, insist that
587 * one of the matching criteria from camperiphunit be present
593 r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
597 if (newunit != dunit)
599 if (resource_string_value(dname, dunit, "sn", &strval) == 0 ||
600 resource_int_value(dname, dunit, "lun", &val) == 0 ||
601 resource_int_value(dname, dunit, "target", &val) == 0 ||
602 resource_string_value(dname, dunit, "at", &strval) == 0)
612 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
613 target_id_t target, lun_id_t lun, const char *sn)
618 const char *dname, *strval;
619 char pathbuf[32], *periph_name;
621 periph_name = p_drv->driver_name;
622 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
627 for (wired = false; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
629 if (resource_string_value(dname, dunit, "at", &strval) == 0) {
630 if (strcmp(strval, pathbuf) != 0)
634 if (resource_int_value(dname, dunit, "target", &val) == 0) {
639 if (resource_int_value(dname, dunit, "lun", &val) == 0) {
644 if (resource_string_value(dname, dunit, "sn", &strval) == 0) {
645 if (sn == NULL || strcmp(strval, sn) != 0)
656 * Either start from 0 looking for the next unit or from
657 * the unit number given in the resource config. This way,
658 * if we have wildcard matches, we don't return the same
661 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
667 cam_periph_invalidate(struct cam_periph *periph)
670 cam_periph_assert(periph, MA_OWNED);
672 * We only tear down the device the first time a peripheral is
675 if ((periph->flags & CAM_PERIPH_INVALID) != 0)
678 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
679 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) {
683 sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN);
684 xpt_denounce_periph_sbuf(periph, &sb);
688 periph->flags |= CAM_PERIPH_INVALID;
689 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
690 if (periph->periph_oninval != NULL)
691 periph->periph_oninval(periph);
692 cam_periph_release_locked(periph);
696 camperiphfree(struct cam_periph *periph)
698 struct periph_driver **p_drv;
699 struct periph_driver *drv;
701 cam_periph_assert(periph, MA_OWNED);
702 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
703 periph->periph_name, periph->unit_number));
704 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
705 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
708 if (*p_drv == NULL) {
709 printf("camperiphfree: attempt to free non-existant periph\n");
713 * Cache a pointer to the periph_driver structure. If a
714 * periph_driver is added or removed from the array (see
715 * periphdriver_register()) while we drop the toplogy lock
716 * below, p_drv may change. This doesn't protect against this
717 * particular periph_driver going away. That will require full
718 * reference counting in the periph_driver infrastructure.
723 * We need to set this flag before dropping the topology lock, to
724 * let anyone who is traversing the list that this peripheral is
725 * about to be freed, and there will be no more reference count
728 periph->flags |= CAM_PERIPH_FREE;
731 * The peripheral destructor semantics dictate calling with only the
732 * SIM mutex held. Since it might sleep, it should not be called
733 * with the topology lock held.
738 * We need to call the peripheral destructor prior to removing the
739 * peripheral from the list. Otherwise, we risk running into a
740 * scenario where the peripheral unit number may get reused
741 * (because it has been removed from the list), but some resources
742 * used by the peripheral are still hanging around. In particular,
743 * the devfs nodes used by some peripherals like the pass(4) driver
744 * aren't fully cleaned up until the destructor is run. If the
745 * unit number is reused before the devfs instance is fully gone,
748 if (periph->periph_dtor != NULL)
749 periph->periph_dtor(periph);
752 * The peripheral list is protected by the topology lock. We have to
753 * remove the periph from the drv list before we call deferred_ac. The
754 * AC_FOUND_DEVICE callback won't create a new periph if it's still there.
758 TAILQ_REMOVE(&drv->units, periph, unit_links);
761 xpt_remove_periph(periph);
764 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
765 xpt_print(periph->path, "Periph destroyed\n");
767 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
769 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
773 memset(&ccb, 0, sizeof(ccb));
774 switch (periph->deferred_ac) {
775 case AC_FOUND_DEVICE:
776 ccb.ccb_h.func_code = XPT_GDEV_TYPE;
777 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
781 case AC_PATH_REGISTERED:
782 xpt_path_inq(&ccb.cpi, periph->path);
789 periph->deferred_callback(NULL, periph->deferred_ac,
792 xpt_free_path(periph->path);
793 free(periph, M_CAMPERIPH);
798 * Map user virtual pointers into kernel virtual address space, so we can
799 * access the memory. This is now a generic function that centralizes most
800 * of the sanity checks on the data flags, if any.
801 * This also only works for up to maxphys memory. Since we use
802 * buffers to map stuff in and out, we're limited to the buffer size.
805 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
809 uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
810 uint32_t lengths[CAM_PERIPH_MAXMAPS];
811 uint32_t dirs[CAM_PERIPH_MAXMAPS];
813 bzero(mapinfo, sizeof(*mapinfo));
815 maxmap = DFLTPHYS; /* traditional default */
816 else if (maxmap > maxphys)
817 maxmap = maxphys; /* for safety */
818 switch(ccb->ccb_h.func_code) {
820 if (ccb->cdm.match_buf_len == 0) {
821 printf("cam_periph_mapmem: invalid match buffer "
825 if (ccb->cdm.pattern_buf_len > 0) {
826 data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns;
827 lengths[0] = ccb->cdm.pattern_buf_len;
828 dirs[0] = CAM_DIR_OUT;
829 data_ptrs[1] = (uint8_t **)&ccb->cdm.matches;
830 lengths[1] = ccb->cdm.match_buf_len;
831 dirs[1] = CAM_DIR_IN;
834 data_ptrs[0] = (uint8_t **)&ccb->cdm.matches;
835 lengths[0] = ccb->cdm.match_buf_len;
836 dirs[0] = CAM_DIR_IN;
840 * This request will not go to the hardware, no reason
841 * to be so strict. vmapbuf() is able to map up to maxphys.
846 case XPT_CONT_TARGET_IO:
847 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
849 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
851 data_ptrs[0] = &ccb->csio.data_ptr;
852 lengths[0] = ccb->csio.dxfer_len;
853 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
857 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
859 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
861 data_ptrs[0] = &ccb->ataio.data_ptr;
862 lengths[0] = ccb->ataio.dxfer_len;
863 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
867 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
869 /* Two mappings: one for cmd->data and one for cmd->data->data */
870 data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data;
871 lengths[0] = sizeof(struct mmc_data *);
872 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
873 data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data;
874 lengths[1] = ccb->mmcio.cmd.data->len;
875 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
879 data_ptrs[0] = &ccb->smpio.smp_request;
880 lengths[0] = ccb->smpio.smp_request_len;
881 dirs[0] = CAM_DIR_OUT;
882 data_ptrs[1] = &ccb->smpio.smp_response;
883 lengths[1] = ccb->smpio.smp_response_len;
884 dirs[1] = CAM_DIR_IN;
889 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
891 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
893 data_ptrs[0] = &ccb->nvmeio.data_ptr;
894 lengths[0] = ccb->nvmeio.dxfer_len;
895 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
898 case XPT_DEV_ADVINFO:
899 if (ccb->cdai.bufsiz == 0)
902 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
903 lengths[0] = ccb->cdai.bufsiz;
904 dirs[0] = CAM_DIR_IN;
908 * This request will not go to the hardware, no reason
909 * to be so strict. vmapbuf() is able to map up to maxphys.
915 break; /* NOTREACHED */
919 * Check the transfer length and permissions first, so we don't
920 * have to unmap any previously mapped buffers.
922 for (i = 0; i < numbufs; i++) {
923 if (lengths[i] > maxmap) {
924 printf("cam_periph_mapmem: attempt to map %lu bytes, "
925 "which is greater than %lu\n",
926 (long)(lengths[i]), (u_long)maxmap);
932 * This keeps the kernel stack of current thread from getting
933 * swapped. In low-memory situations where the kernel stack might
934 * otherwise get swapped out, this holds it and allows the thread
935 * to make progress and release the kernel mapped pages sooner.
937 * XXX KDM should I use P_NOSWAP instead?
941 for (i = 0; i < numbufs; i++) {
942 /* Save the user's data address. */
943 mapinfo->orig[i] = *data_ptrs[i];
946 * For small buffers use malloc+copyin/copyout instead of
947 * mapping to KVA to avoid expensive TLB shootdowns. For
948 * small allocations malloc is backed by UMA, and so much
949 * cheaper on SMP systems.
951 if (lengths[i] <= periph_mapmem_thresh &&
952 ccb->ccb_h.func_code != XPT_MMC_IO) {
953 *data_ptrs[i] = malloc(lengths[i], M_CAMPERIPH,
955 if (dirs[i] != CAM_DIR_IN) {
956 if (copyin(mapinfo->orig[i], *data_ptrs[i],
958 free(*data_ptrs[i], M_CAMPERIPH);
959 *data_ptrs[i] = mapinfo->orig[i];
963 bzero(*data_ptrs[i], lengths[i]);
970 mapinfo->bp[i] = uma_zalloc(pbuf_zone, M_WAITOK);
972 /* set the direction */
973 mapinfo->bp[i]->b_iocmd = (dirs[i] == CAM_DIR_OUT) ?
974 BIO_WRITE : BIO_READ;
976 /* Map the buffer into kernel memory. */
977 if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i], 1) < 0) {
978 uma_zfree(pbuf_zone, mapinfo->bp[i]);
982 /* set our pointer to the new mapped area */
983 *data_ptrs[i] = mapinfo->bp[i]->b_data;
987 * Now that we've gotten this far, change ownership to the kernel
988 * of the buffers so that we don't run afoul of returning to user
989 * space with locks (on the buffer) held.
991 for (i = 0; i < numbufs; i++) {
993 BUF_KERNPROC(mapinfo->bp[i]);
996 mapinfo->num_bufs_used = numbufs;
1000 for (i--; i >= 0; i--) {
1001 if (mapinfo->bp[i]) {
1002 vunmapbuf(mapinfo->bp[i]);
1003 uma_zfree(pbuf_zone, mapinfo->bp[i]);
1005 free(*data_ptrs[i], M_CAMPERIPH);
1006 *data_ptrs[i] = mapinfo->orig[i];
1013 * Unmap memory segments mapped into kernel virtual address space by
1014 * cam_periph_mapmem().
1017 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
1020 uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1021 uint32_t lengths[CAM_PERIPH_MAXMAPS];
1022 uint32_t dirs[CAM_PERIPH_MAXMAPS];
1024 if (mapinfo->num_bufs_used <= 0) {
1025 /* nothing to free and the process wasn't held. */
1029 switch (ccb->ccb_h.func_code) {
1031 if (ccb->cdm.pattern_buf_len > 0) {
1032 data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns;
1033 lengths[0] = ccb->cdm.pattern_buf_len;
1034 dirs[0] = CAM_DIR_OUT;
1035 data_ptrs[1] = (uint8_t **)&ccb->cdm.matches;
1036 lengths[1] = ccb->cdm.match_buf_len;
1037 dirs[1] = CAM_DIR_IN;
1040 data_ptrs[0] = (uint8_t **)&ccb->cdm.matches;
1041 lengths[0] = ccb->cdm.match_buf_len;
1042 dirs[0] = CAM_DIR_IN;
1047 case XPT_CONT_TARGET_IO:
1048 data_ptrs[0] = &ccb->csio.data_ptr;
1049 lengths[0] = ccb->csio.dxfer_len;
1050 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1054 data_ptrs[0] = &ccb->ataio.data_ptr;
1055 lengths[0] = ccb->ataio.dxfer_len;
1056 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1060 data_ptrs[0] = (uint8_t **)&ccb->mmcio.cmd.data;
1061 lengths[0] = sizeof(struct mmc_data *);
1062 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1063 data_ptrs[1] = (uint8_t **)&ccb->mmcio.cmd.data->data;
1064 lengths[1] = ccb->mmcio.cmd.data->len;
1065 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
1069 data_ptrs[0] = &ccb->smpio.smp_request;
1070 lengths[0] = ccb->smpio.smp_request_len;
1071 dirs[0] = CAM_DIR_OUT;
1072 data_ptrs[1] = &ccb->smpio.smp_response;
1073 lengths[1] = ccb->smpio.smp_response_len;
1074 dirs[1] = CAM_DIR_IN;
1078 case XPT_NVME_ADMIN:
1079 data_ptrs[0] = &ccb->nvmeio.data_ptr;
1080 lengths[0] = ccb->nvmeio.dxfer_len;
1081 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1084 case XPT_DEV_ADVINFO:
1085 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1086 lengths[0] = ccb->cdai.bufsiz;
1087 dirs[0] = CAM_DIR_IN;
1091 /* allow ourselves to be swapped once again */
1094 break; /* NOTREACHED */
1097 for (i = 0; i < numbufs; i++) {
1098 if (mapinfo->bp[i]) {
1099 /* unmap the buffer */
1100 vunmapbuf(mapinfo->bp[i]);
1102 /* release the buffer */
1103 uma_zfree(pbuf_zone, mapinfo->bp[i]);
1105 if (dirs[i] != CAM_DIR_OUT) {
1106 copyout(*data_ptrs[i], mapinfo->orig[i],
1109 free(*data_ptrs[i], M_CAMPERIPH);
1112 /* Set the user's pointer back to the original value */
1113 *data_ptrs[i] = mapinfo->orig[i];
1116 /* allow ourselves to be swapped once again */
1121 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
1122 int (*error_routine)(union ccb *ccb,
1124 uint32_t sense_flags))
1133 case CAMGETPASSTHRU_0x19:
1134 case CAMGETPASSTHRU:
1135 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1136 xpt_setup_ccb(&ccb->ccb_h,
1138 CAM_PRIORITY_NORMAL);
1139 ccb->ccb_h.func_code = XPT_GDEVLIST;
1142 * Basically, the point of this is that we go through
1143 * getting the list of devices, until we find a passthrough
1144 * device. In the current version of the CAM code, the
1145 * only way to determine what type of device we're dealing
1146 * with is by its name.
1148 while (found == 0) {
1149 ccb->cgdl.index = 0;
1150 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
1151 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
1152 /* we want the next device in the list */
1154 if (strncmp(ccb->cgdl.periph_name,
1160 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
1162 ccb->cgdl.periph_name[0] = '\0';
1163 ccb->cgdl.unit_number = 0;
1168 /* copy the result back out */
1169 bcopy(ccb, addr, sizeof(union ccb));
1171 /* and release the ccb */
1172 xpt_release_ccb(ccb);
1183 cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
1186 panic("%s: already done with ccb %p", __func__, done_ccb);
1190 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1193 /* Caller will release the CCB */
1194 xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
1195 done_ccb->ccb_h.cbfcnp = cam_periph_done_panic;
1196 wakeup(&done_ccb->ccb_h.cbfcnp);
1200 cam_periph_ccbwait(union ccb *ccb)
1203 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
1204 while (ccb->ccb_h.cbfcnp != cam_periph_done_panic)
1205 xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp,
1206 PRIBIO, "cbwait", 0);
1208 KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
1209 (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG,
1210 ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
1211 "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
1212 ccb->ccb_h.status, ccb->ccb_h.pinfo.index));
1216 * Dispatch a CCB and wait for it to complete. If the CCB has set a
1217 * callback function (ccb->ccb_h.cbfcnp), it will be overwritten and lost.
1220 cam_periph_runccb(union ccb *ccb,
1221 int (*error_routine)(union ccb *ccb,
1223 uint32_t sense_flags),
1224 cam_flags camflags, uint32_t sense_flags,
1227 struct bintime *starttime;
1228 struct bintime ltime;
1231 uint32_t timeout = 1;
1234 xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1235 KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
1236 ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
1237 ccb->ccb_h.func_code, ccb->ccb_h.flags));
1240 * If the user has supplied a stats structure, and if we understand
1241 * this particular type of ccb, record the transaction start.
1244 (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1245 ccb->ccb_h.func_code == XPT_ATA_IO ||
1246 ccb->ccb_h.func_code == XPT_NVME_IO)) {
1248 binuptime(starttime);
1249 devstat_start_transaction(ds, starttime);
1253 * We must poll the I/O while we're dumping. The scheduler is normally
1254 * stopped for dumping, except when we call doadump from ddb. While the
1255 * scheduler is running in this case, we still need to poll the I/O to
1256 * avoid sleeping waiting for the ccb to complete.
1258 * A panic triggered dump stops the scheduler, any callback from the
1259 * shutdown_post_sync event will run with the scheduler stopped, but
1260 * before we're officially dumping. To avoid hanging in adashutdown
1261 * initiated commands (or other similar situations), we have to test for
1262 * either dumping or SCHEDULER_STOPPED() here.
1264 * To avoid locking problems, dumping/polling callers must call
1265 * without a periph lock held.
1267 must_poll = dumping || SCHEDULER_STOPPED();
1268 ccb->ccb_h.cbfcnp = cam_periph_done;
1271 * If we're polling, then we need to ensure that we have ample resources
1272 * in the periph. cam_periph_error can reschedule the ccb by calling
1273 * xpt_action and returning ERESTART, so we have to effect the polling
1274 * in the do loop below.
1277 if (cam_sim_pollable(ccb->ccb_h.path->bus->sim))
1278 timeout = xpt_poll_setup(ccb);
1284 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1290 xpt_pollwait(ccb, timeout);
1291 timeout = ccb->ccb_h.timeout * 10;
1293 cam_periph_ccbwait(ccb);
1295 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1297 else if (error_routine != NULL) {
1299 * cbfcnp is modified by cam_periph_ccbwait so
1300 * reset it before we call the error routine
1301 * which may call xpt_done.
1303 ccb->ccb_h.cbfcnp = cam_periph_done;
1304 error = (*error_routine)(ccb, camflags, sense_flags);
1307 } while (error == ERESTART);
1310 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1311 cam_release_devq(ccb->ccb_h.path,
1312 /* relsim_flags */0,
1315 /* getcount_only */ FALSE);
1316 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1321 devstat_tag_type tag;
1324 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1325 bytes = ccb->csio.dxfer_len - ccb->csio.resid;
1326 tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3);
1327 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1328 bytes = ccb->ataio.dxfer_len - ccb->ataio.resid;
1329 tag = (devstat_tag_type)0;
1330 } else if (ccb->ccb_h.func_code == XPT_NVME_IO) {
1331 bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */
1332 tag = (devstat_tag_type)0;
1337 devstat_end_transaction(ds, bytes, tag,
1338 ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ?
1339 DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1340 DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime);
1347 cam_freeze_devq(struct cam_path *path)
1349 struct ccb_hdr ccb_h;
1351 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1352 memset(&ccb_h, 0, sizeof(ccb_h));
1353 xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1354 ccb_h.func_code = XPT_NOOP;
1355 ccb_h.flags = CAM_DEV_QFREEZE;
1356 xpt_action((union ccb *)&ccb_h);
1360 cam_release_devq(struct cam_path *path, uint32_t relsim_flags,
1361 uint32_t openings, uint32_t arg,
1364 struct ccb_relsim crs;
1366 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1367 relsim_flags, openings, arg, getcount_only));
1368 memset(&crs, 0, sizeof(crs));
1369 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1370 crs.ccb_h.func_code = XPT_REL_SIMQ;
1371 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1372 crs.release_flags = relsim_flags;
1373 crs.openings = openings;
1374 crs.release_timeout = arg;
1375 xpt_action((union ccb *)&crs);
1376 return (crs.qfrozen_cnt);
1379 #define saved_ccb_ptr ppriv_ptr0
1381 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1383 union ccb *saved_ccb;
1385 struct scsi_start_stop_unit *scsi_cmd;
1386 int error = 0, error_code, sense_key, asc, ascq;
1387 uint16_t done_flags;
1389 scsi_cmd = (struct scsi_start_stop_unit *)
1390 &done_ccb->csio.cdb_io.cdb_bytes;
1391 status = done_ccb->ccb_h.status;
1393 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1394 if (scsi_extract_sense_ccb(done_ccb,
1395 &error_code, &sense_key, &asc, &ascq)) {
1397 * If the error is "invalid field in CDB",
1398 * and the load/eject flag is set, turn the
1399 * flag off and try again. This is just in
1400 * case the drive in question barfs on the
1401 * load eject flag. The CAM code should set
1402 * the load/eject flag by default for
1405 if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1406 ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1407 (asc == 0x24) && (ascq == 0x00)) {
1408 scsi_cmd->how &= ~SSS_LOEJ;
1409 if (status & CAM_DEV_QFRZN) {
1410 cam_release_devq(done_ccb->ccb_h.path,
1412 done_ccb->ccb_h.status &=
1415 xpt_action(done_ccb);
1419 error = cam_periph_error(done_ccb, 0,
1420 SF_RETRY_UA | SF_NO_PRINT);
1421 if (error == ERESTART)
1423 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1424 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1425 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1429 * If we have successfully taken a device from the not
1430 * ready to ready state, re-scan the device and re-get
1431 * the inquiry information. Many devices (mostly disks)
1432 * don't properly report their inquiry information unless
1435 if (scsi_cmd->opcode == START_STOP_UNIT)
1436 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1439 /* If we tried long wait and still failed, remember that. */
1440 if ((periph->flags & CAM_PERIPH_RECOVERY_WAIT) &&
1441 (done_ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY)) {
1442 periph->flags &= ~CAM_PERIPH_RECOVERY_WAIT;
1443 if (error != 0 && done_ccb->ccb_h.retry_count == 0)
1444 periph->flags |= CAM_PERIPH_RECOVERY_WAIT_FAILED;
1448 * After recovery action(s) completed, return to the original CCB.
1449 * If the recovery CCB has failed, considering its own possible
1450 * retries and recovery, assume we are back in state where we have
1451 * been originally, but without recovery hopes left. In such case,
1452 * after the final attempt below, we cancel any further retries,
1453 * blocking by that also any new recovery attempts for this CCB,
1454 * and the result will be the final one returned to the CCB owher.
1456 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1457 KASSERT(saved_ccb->ccb_h.func_code == XPT_SCSI_IO,
1458 ("%s: saved_ccb func_code %#x != XPT_SCSI_IO",
1459 __func__, saved_ccb->ccb_h.func_code));
1460 KASSERT(done_ccb->ccb_h.func_code == XPT_SCSI_IO,
1461 ("%s: done_ccb func_code %#x != XPT_SCSI_IO",
1462 __func__, done_ccb->ccb_h.func_code));
1463 saved_ccb->ccb_h.periph_links = done_ccb->ccb_h.periph_links;
1464 done_flags = done_ccb->ccb_h.alloc_flags;
1465 bcopy(saved_ccb, done_ccb, sizeof(struct ccb_scsiio));
1466 done_ccb->ccb_h.alloc_flags = done_flags;
1467 xpt_free_ccb(saved_ccb);
1468 if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1469 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1471 done_ccb->ccb_h.retry_count = 0;
1472 xpt_action(done_ccb);
1475 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1476 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1480 * Generic Async Event handler. Peripheral drivers usually
1481 * filter out the events that require personal attention,
1482 * and leave the rest to this function.
1485 cam_periph_async(struct cam_periph *periph, uint32_t code,
1486 struct cam_path *path, void *arg)
1489 case AC_LOST_DEVICE:
1490 cam_periph_invalidate(periph);
1498 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1500 struct ccb_getdevstats cgds;
1502 memset(&cgds, 0, sizeof(cgds));
1503 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1504 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1505 xpt_action((union ccb *)&cgds);
1506 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1510 cam_periph_freeze_after_event(struct cam_periph *periph,
1511 struct timeval* event_time, u_int duration_ms)
1513 struct timeval delta;
1514 struct timeval duration_tv;
1516 if (!timevalisset(event_time))
1520 timevalsub(&delta, event_time);
1521 duration_tv.tv_sec = duration_ms / 1000;
1522 duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1523 if (timevalcmp(&delta, &duration_tv, <)) {
1524 timevalsub(&duration_tv, &delta);
1526 duration_ms = duration_tv.tv_sec * 1000;
1527 duration_ms += duration_tv.tv_usec / 1000;
1528 cam_freeze_devq(periph->path);
1529 cam_release_devq(periph->path,
1530 RELSIM_RELEASE_AFTER_TIMEOUT,
1532 /*timeout*/duration_ms,
1533 /*getcount_only*/0);
1539 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1540 cam_flags camflags, uint32_t sense_flags,
1541 int *openings, uint32_t *relsim_flags,
1542 uint32_t *timeout, uint32_t *action, const char **action_string)
1544 struct cam_periph *periph;
1547 switch (ccb->csio.scsi_status) {
1548 case SCSI_STATUS_OK:
1549 case SCSI_STATUS_COND_MET:
1550 case SCSI_STATUS_INTERMED:
1551 case SCSI_STATUS_INTERMED_COND_MET:
1554 case SCSI_STATUS_CMD_TERMINATED:
1555 case SCSI_STATUS_CHECK_COND:
1556 error = camperiphscsisenseerror(ccb, orig_ccb,
1565 case SCSI_STATUS_QUEUE_FULL:
1568 struct ccb_getdevstats cgds;
1571 * First off, find out what the current
1572 * transaction counts are.
1574 memset(&cgds, 0, sizeof(cgds));
1575 xpt_setup_ccb(&cgds.ccb_h,
1577 CAM_PRIORITY_NORMAL);
1578 cgds.ccb_h.func_code = XPT_GDEV_STATS;
1579 xpt_action((union ccb *)&cgds);
1582 * If we were the only transaction active, treat
1583 * the QUEUE FULL as if it were a BUSY condition.
1585 if (cgds.dev_active != 0) {
1589 * Reduce the number of openings to
1590 * be 1 less than the amount it took
1591 * to get a queue full bounded by the
1592 * minimum allowed tag count for this
1595 total_openings = cgds.dev_active + cgds.dev_openings;
1596 *openings = cgds.dev_active;
1597 if (*openings < cgds.mintags)
1598 *openings = cgds.mintags;
1599 if (*openings < total_openings)
1600 *relsim_flags = RELSIM_ADJUST_OPENINGS;
1603 * Some devices report queue full for
1604 * temporary resource shortages. For
1605 * this reason, we allow a minimum
1606 * tag count to be entered via a
1607 * quirk entry to prevent the queue
1608 * count on these devices from falling
1609 * to a pessimisticly low value. We
1610 * still wait for the next successful
1611 * completion, however, before queueing
1612 * more transactions to the device.
1614 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1618 *action &= ~SSQ_PRINT_SENSE;
1623 case SCSI_STATUS_BUSY:
1625 * Restart the queue after either another
1626 * command completes or a 1 second timeout.
1628 periph = xpt_path_periph(ccb->ccb_h.path);
1629 if (periph->flags & CAM_PERIPH_INVALID) {
1631 *action_string = "Periph was invalidated";
1632 } else if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1633 ccb->ccb_h.retry_count > 0) {
1634 if ((sense_flags & SF_RETRY_BUSY) == 0)
1635 ccb->ccb_h.retry_count--;
1637 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1638 | RELSIM_RELEASE_AFTER_CMDCMPLT;
1642 *action_string = "Retries exhausted";
1645 case SCSI_STATUS_RESERV_CONFLICT:
1654 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1655 cam_flags camflags, uint32_t sense_flags,
1656 int *openings, uint32_t *relsim_flags,
1657 uint32_t *timeout, uint32_t *action, const char **action_string)
1659 struct cam_periph *periph;
1660 union ccb *orig_ccb = ccb;
1661 int error, recoveryccb;
1664 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1665 if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL)
1666 biotrack(ccb->csio.bio, __func__);
1669 periph = xpt_path_periph(ccb->ccb_h.path);
1670 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1671 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1673 * If error recovery is already in progress, don't attempt
1674 * to process this error, but requeue it unconditionally
1675 * and attempt to process it once error recovery has
1676 * completed. This failed command is probably related to
1677 * the error that caused the currently active error recovery
1678 * action so our current recovery efforts should also
1679 * address this command. Be aware that the error recovery
1680 * code assumes that only one recovery action is in progress
1681 * on a particular peripheral instance at any given time
1682 * (e.g. only one saved CCB for error recovery) so it is
1683 * imperitive that we don't violate this assumption.
1686 *action &= ~SSQ_PRINT_SENSE;
1688 scsi_sense_action err_action;
1689 struct ccb_getdev cgd;
1692 * Grab the inquiry data for this device.
1694 memset(&cgd, 0, sizeof(cgd));
1695 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1696 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1697 xpt_action((union ccb *)&cgd);
1699 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1701 error = err_action & SS_ERRMASK;
1704 * Do not autostart sequential access devices
1705 * to avoid unexpected tape loading.
1707 if ((err_action & SS_MASK) == SS_START &&
1708 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1709 *action_string = "Will not autostart a "
1710 "sequential access device";
1711 goto sense_error_done;
1715 * Avoid recovery recursion if recovery action is the same.
1717 if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1718 if (((err_action & SS_MASK) == SS_START &&
1719 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1720 ((err_action & SS_MASK) == SS_TUR &&
1721 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1722 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1723 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1729 * If the recovery action will consume a retry,
1730 * make sure we actually have retries available.
1732 if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1733 if (ccb->ccb_h.retry_count > 0 &&
1734 (periph->flags & CAM_PERIPH_INVALID) == 0)
1735 ccb->ccb_h.retry_count--;
1737 *action_string = "Retries exhausted";
1738 goto sense_error_done;
1742 if ((err_action & SS_MASK) >= SS_START) {
1744 * Do common portions of commands that
1745 * use recovery CCBs.
1747 orig_ccb = xpt_alloc_ccb_nowait();
1748 if (orig_ccb == NULL) {
1749 *action_string = "Can't allocate recovery CCB";
1750 goto sense_error_done;
1753 * Clear freeze flag for original request here, as
1754 * this freeze will be dropped as part of ERESTART.
1756 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1758 KASSERT(ccb->ccb_h.func_code == XPT_SCSI_IO,
1759 ("%s: ccb func_code %#x != XPT_SCSI_IO",
1760 __func__, ccb->ccb_h.func_code));
1761 flags = orig_ccb->ccb_h.alloc_flags;
1762 bcopy(ccb, orig_ccb, sizeof(struct ccb_scsiio));
1763 orig_ccb->ccb_h.alloc_flags = flags;
1766 switch (err_action & SS_MASK) {
1768 *action_string = "No recovery action needed";
1772 *action_string = "Retrying command (per sense data)";
1776 *action_string = "Unretryable error";
1783 * Send a start unit command to the device, and
1784 * then retry the command.
1786 *action_string = "Attempting to start unit";
1787 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1790 * Check for removable media and set
1791 * load/eject flag appropriately.
1793 if (SID_IS_REMOVABLE(&cgd.inq_data))
1798 scsi_start_stop(&ccb->csio,
1812 * Send a Test Unit Ready to the device.
1813 * If the 'many' flag is set, we send 120
1814 * test unit ready commands, one every half
1815 * second. Otherwise, we just send one TUR.
1816 * We only want to do this if the retry
1817 * count has not been exhausted.
1821 if ((err_action & SSQ_MANY) != 0 && (periph->flags &
1822 CAM_PERIPH_RECOVERY_WAIT_FAILED) == 0) {
1823 periph->flags |= CAM_PERIPH_RECOVERY_WAIT;
1824 *action_string = "Polling device for readiness";
1827 *action_string = "Testing device for readiness";
1830 periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1831 scsi_test_unit_ready(&ccb->csio,
1839 * Accomplish our 500ms delay by deferring
1840 * the release of our device queue appropriately.
1842 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1847 panic("Unhandled error action %x", err_action);
1850 if ((err_action & SS_MASK) >= SS_START) {
1852 * Drop the priority, so that the recovery
1853 * CCB is the first to execute. Freeze the queue
1854 * after this command is sent so that we can
1855 * restore the old csio and have it queued in
1856 * the proper order before we release normal
1857 * transactions to the device.
1859 ccb->ccb_h.pinfo.priority--;
1860 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1861 ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1867 *action = err_action;
1873 * Generic error handler. Peripheral drivers usually filter
1874 * out the errors that they handle in a unique manner, then
1875 * call this function.
1878 cam_periph_error(union ccb *ccb, cam_flags camflags,
1879 uint32_t sense_flags)
1881 struct cam_path *newpath;
1882 union ccb *orig_ccb, *scan_ccb;
1883 struct cam_periph *periph;
1884 const char *action_string;
1886 int frozen, error, openings, devctl_err;
1887 uint32_t action, relsim_flags, timeout;
1889 action = SSQ_PRINT_SENSE;
1890 periph = xpt_path_periph(ccb->ccb_h.path);
1891 action_string = NULL;
1892 status = ccb->ccb_h.status;
1893 frozen = (status & CAM_DEV_QFRZN) != 0;
1894 status &= CAM_STATUS_MASK;
1895 devctl_err = openings = relsim_flags = timeout = 0;
1898 /* Filter the errors that should be reported via devctl */
1899 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1900 case CAM_CMD_TIMEOUT:
1901 case CAM_REQ_ABORTED:
1902 case CAM_REQ_CMP_ERR:
1903 case CAM_REQ_TERMIO:
1904 case CAM_UNREC_HBA_ERROR:
1905 case CAM_DATA_RUN_ERR:
1906 case CAM_SCSI_STATUS_ERROR:
1907 case CAM_ATA_STATUS_ERROR:
1908 case CAM_SMP_STATUS_ERROR:
1909 case CAM_DEV_NOT_THERE:
1910 case CAM_NVME_STATUS_ERROR:
1920 action &= ~SSQ_PRINT_SENSE;
1922 case CAM_SCSI_STATUS_ERROR:
1923 error = camperiphscsistatuserror(ccb, &orig_ccb,
1924 camflags, sense_flags, &openings, &relsim_flags,
1925 &timeout, &action, &action_string);
1927 case CAM_AUTOSENSE_FAIL:
1928 error = EIO; /* we have to kill the command */
1932 case CAM_MSG_REJECT_REC:
1933 /* XXX Don't know that these are correct */
1936 case CAM_SEL_TIMEOUT:
1937 if ((camflags & CAM_RETRY_SELTO) != 0) {
1938 if (ccb->ccb_h.retry_count > 0 &&
1939 (periph->flags & CAM_PERIPH_INVALID) == 0) {
1940 ccb->ccb_h.retry_count--;
1944 * Wait a bit to give the device
1945 * time to recover before we try again.
1947 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1948 timeout = periph_selto_delay;
1951 action_string = "Retries exhausted";
1954 case CAM_DEV_NOT_THERE:
1958 case CAM_REQ_INVALID:
1959 case CAM_PATH_INVALID:
1961 case CAM_PROVIDE_FAIL:
1962 case CAM_REQ_TOO_BIG:
1963 case CAM_LUN_INVALID:
1964 case CAM_TID_INVALID:
1965 case CAM_FUNC_NOTAVAIL:
1968 case CAM_SCSI_BUS_RESET:
1971 * Commands that repeatedly timeout and cause these
1972 * kinds of error recovery actions, should return
1973 * CAM_CMD_TIMEOUT, which allows us to safely assume
1974 * that this command was an innocent bystander to
1975 * these events and should be unconditionally
1978 case CAM_REQUEUE_REQ:
1979 /* Unconditional requeue if device is still there */
1980 if (periph->flags & CAM_PERIPH_INVALID) {
1981 action_string = "Periph was invalidated";
1983 } else if (sense_flags & SF_NO_RETRY) {
1985 action_string = "Retry was blocked";
1988 action &= ~SSQ_PRINT_SENSE;
1991 case CAM_RESRC_UNAVAIL:
1992 /* Wait a bit for the resource shortage to abate. */
1993 timeout = periph_noresrc_delay;
1997 /* Wait a bit for the busy condition to abate. */
1998 timeout = periph_busy_delay;
2000 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
2002 case CAM_ATA_STATUS_ERROR:
2003 case CAM_NVME_STATUS_ERROR:
2004 case CAM_SMP_STATUS_ERROR:
2005 case CAM_REQ_CMP_ERR:
2006 case CAM_CMD_TIMEOUT:
2007 case CAM_UNEXP_BUSFREE:
2008 case CAM_UNCOR_PARITY:
2009 case CAM_DATA_RUN_ERR:
2011 if (periph->flags & CAM_PERIPH_INVALID) {
2013 action_string = "Periph was invalidated";
2014 } else if (ccb->ccb_h.retry_count == 0) {
2016 action_string = "Retries exhausted";
2017 } else if (sense_flags & SF_NO_RETRY) {
2019 action_string = "Retry was blocked";
2021 ccb->ccb_h.retry_count--;
2027 if ((sense_flags & SF_PRINT_ALWAYS) ||
2028 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
2029 action |= SSQ_PRINT_SENSE;
2030 else if (sense_flags & SF_NO_PRINT)
2031 action &= ~SSQ_PRINT_SENSE;
2032 if ((action & SSQ_PRINT_SENSE) != 0)
2033 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
2034 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
2035 if (error != ERESTART) {
2036 if (action_string == NULL)
2037 action_string = "Unretryable error";
2038 xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
2039 error, action_string);
2040 } else if (action_string != NULL)
2041 xpt_print(ccb->ccb_h.path, "%s\n", action_string);
2043 xpt_print(ccb->ccb_h.path,
2044 "Retrying command, %d more tries remain\n",
2045 ccb->ccb_h.retry_count);
2049 if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
2050 cam_periph_devctl_notify(orig_ccb);
2052 if ((action & SSQ_LOST) != 0) {
2056 * For a selection timeout, we consider all of the LUNs on
2057 * the target to be gone. If the status is CAM_DEV_NOT_THERE,
2058 * then we only get rid of the device(s) specified by the
2059 * path in the original CCB.
2061 if (status == CAM_SEL_TIMEOUT)
2062 lun_id = CAM_LUN_WILDCARD;
2064 lun_id = xpt_path_lun_id(ccb->ccb_h.path);
2066 /* Should we do more if we can't create the path?? */
2067 if (xpt_create_path(&newpath, periph,
2068 xpt_path_path_id(ccb->ccb_h.path),
2069 xpt_path_target_id(ccb->ccb_h.path),
2070 lun_id) == CAM_REQ_CMP) {
2072 * Let peripheral drivers know that this
2073 * device has gone away.
2075 xpt_async(AC_LOST_DEVICE, newpath, NULL);
2076 xpt_free_path(newpath);
2080 /* Broadcast UNIT ATTENTIONs to all periphs. */
2081 if ((action & SSQ_UA) != 0)
2082 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
2084 /* Rescan target on "Reported LUNs data has changed" */
2085 if ((action & SSQ_RESCAN) != 0) {
2086 if (xpt_create_path(&newpath, NULL,
2087 xpt_path_path_id(ccb->ccb_h.path),
2088 xpt_path_target_id(ccb->ccb_h.path),
2089 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2090 scan_ccb = xpt_alloc_ccb_nowait();
2091 if (scan_ccb != NULL) {
2092 scan_ccb->ccb_h.path = newpath;
2093 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
2094 scan_ccb->crcn.flags = 0;
2095 xpt_rescan(scan_ccb);
2098 "Can't allocate CCB to rescan target\n");
2099 xpt_free_path(newpath);
2104 /* Attempt a retry */
2105 if (error == ERESTART || error == 0) {
2107 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2108 if (error == ERESTART)
2111 cam_release_devq(ccb->ccb_h.path,
2115 /*getcount_only*/0);
2121 #define CAM_PERIPH_DEVD_MSG_SIZE 256
2124 cam_periph_devctl_notify(union ccb *ccb)
2126 struct cam_periph *periph;
2127 struct ccb_getdev *cgd;
2129 int serr, sk, asc, ascq;
2132 sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
2136 sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
2138 periph = xpt_path_periph(ccb->ccb_h.path);
2139 sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
2140 periph->unit_number);
2142 sbuf_printf(&sb, "serial=\"");
2143 if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
2144 xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
2145 CAM_PRIORITY_NORMAL);
2146 cgd->ccb_h.func_code = XPT_GDEV_TYPE;
2147 xpt_action((union ccb *)cgd);
2149 if (cgd->ccb_h.status == CAM_REQ_CMP)
2150 sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
2151 xpt_free_ccb((union ccb *)cgd);
2153 sbuf_printf(&sb, "\" ");
2154 sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
2156 switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
2157 case CAM_CMD_TIMEOUT:
2158 sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
2161 case CAM_SCSI_STATUS_ERROR:
2162 sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
2163 if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
2164 sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
2165 serr, sk, asc, ascq);
2168 case CAM_ATA_STATUS_ERROR:
2169 sbuf_printf(&sb, "RES=\"");
2170 ata_res_sbuf(&ccb->ataio.res, &sb);
2171 sbuf_printf(&sb, "\" ");
2174 case CAM_NVME_STATUS_ERROR:
2176 struct ccb_nvmeio *n = &ccb->nvmeio;
2178 sbuf_printf(&sb, "sc=\"%02x\" sct=\"%02x\" cdw0=\"%08x\" ",
2179 NVME_STATUS_GET_SC(n->cpl.status),
2180 NVME_STATUS_GET_SCT(n->cpl.status), n->cpl.cdw0);
2190 switch (ccb->ccb_h.func_code) {
2192 sbuf_printf(&sb, "CDB=\"");
2193 scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
2194 sbuf_printf(&sb, "\" ");
2197 sbuf_printf(&sb, "ACB=\"");
2198 ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
2199 sbuf_printf(&sb, "\" ");
2202 case XPT_NVME_ADMIN:
2204 struct ccb_nvmeio *n = &ccb->nvmeio;
2205 struct nvme_command *cmd = &n->cmd;
2207 // XXX Likely should be nvme_cmd_sbuf
2208 sbuf_printf(&sb, "opc=\"%02x\" fuse=\"%02x\" cid=\"%04x\" "
2209 "nsid=\"%08x\" cdw10=\"%08x\" cdw11=\"%08x\" cdw12=\"%08x\" "
2210 "cdw13=\"%08x\" cdw14=\"%08x\" cdw15=\"%08x\" ",
2211 cmd->opc, cmd->fuse, cmd->cid, cmd->nsid, cmd->cdw10,
2212 cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14, cmd->cdw15);
2219 if (sbuf_finish(&sb) == 0)
2220 devctl_notify("CAM", "periph", type, sbuf_data(&sb));
2222 free(sbmsg, M_CAMPERIPH);
2226 * Sysctl to force an invalidation of the drive right now. Can be
2227 * called with CTLFLAG_MPSAFE since we take periph lock.
2230 cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS)
2232 struct cam_periph *periph;
2237 error = sysctl_handle_int(oidp, &value, 0, req);
2238 if (error != 0 || req->newptr == NULL || value != 1)
2241 cam_periph_lock(periph);
2242 cam_periph_invalidate(periph);
2243 cam_periph_unlock(periph);