2 * Copyright (c) 2004, 2005 Silicon Graphics International Corp.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * substantially similar to the "NO WARRANTY" disclaimer below
13 * ("Disclaimer") and any redistribution must be conditioned upon
14 * including a substantially similar Disclaimer requirement for further
15 * binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_internal.c#5 $
33 * CTL kernel internal frontend target driver. This allows kernel-level
34 * clients to send commands into CTL.
36 * This has elements of a FETD (e.g. it has to set tag numbers, initiator,
37 * port, target, and LUN) and elements of an initiator (LUN discovery and
38 * probing, error recovery, command initiation). Even though this has some
39 * initiator type elements, this is not intended to be a full fledged
40 * initiator layer. It is only intended to send a limited number of
41 * commands to a well known target layer.
43 * To be able to fulfill the role of a full initiator layer, it would need
44 * a whole lot more functionality.
46 * Author: Ken Merry <ken@FreeBSD.org>
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/types.h>
57 #include <sys/malloc.h>
58 #include <sys/module.h>
60 #include <sys/mutex.h>
61 #include <sys/condvar.h>
62 #include <sys/queue.h>
64 #include <sys/sysctl.h>
66 #include <cam/scsi/scsi_all.h>
67 #include <cam/scsi/scsi_da.h>
68 #include <cam/ctl/ctl_io.h>
69 #include <cam/ctl/ctl.h>
70 #include <cam/ctl/ctl_frontend.h>
71 #include <cam/ctl/ctl_frontend_internal.h>
72 #include <cam/ctl/ctl_backend.h>
73 #include <cam/ctl/ctl_ioctl.h>
74 #include <cam/ctl/ctl_util.h>
75 #include <cam/ctl/ctl_ha.h>
76 #include <cam/ctl/ctl_private.h>
77 #include <cam/ctl/ctl_debug.h>
78 #include <cam/ctl/ctl_scsi_all.h>
79 #include <cam/ctl/ctl_error.h>
83 * - overall metatask, different potential metatask types (e.g. forced
84 * shutdown, gentle shutdown)
85 * - forced shutdown metatask:
86 * - states: report luns, pending, done?
87 * - list of luns pending, with the relevant I/O for that lun attached.
88 * This would allow moving ahead on LUNs with no errors, and going
89 * into error recovery on LUNs with problems. Per-LUN states might
90 * include inquiry, stop/offline, done.
92 * Use LUN enable for LUN list instead of getting it manually? We'd still
93 * need inquiry data for each LUN.
95 * How to handle processor LUN w.r.t. found/stopped counts?
104 struct cfi_task_startstop {
110 /* XXX KDM add more fields here */
114 struct cfi_task_startstop startstop;
117 struct cfi_metatask {
118 cfi_tasktype tasktype;
119 cfi_mt_status status;
120 union cfi_taskinfo taskinfo;
122 STAILQ_ENTRY(cfi_metatask) links;
127 CFI_ERR_RETRY = 0x000,
128 CFI_ERR_FAIL = 0x001,
129 CFI_ERR_LUN_RESET = 0x002,
130 CFI_ERR_MASK = 0x0ff,
131 CFI_ERR_NO_DECREMENT = 0x100
141 CFI_LUN_READCAPACITY,
142 CFI_LUN_READCAPACITY_16,
147 struct ctl_id target_id;
149 struct scsi_inquiry_data inq_data;
152 int blocksize_powerof2;
153 uint32_t cur_tag_num;
155 struct cfi_softc *softc;
156 STAILQ_HEAD(, cfi_lun_io) io_list;
157 STAILQ_ENTRY(cfi_lun) links;
162 struct cfi_metatask *metatask;
163 cfi_error_policy policy;
164 void (*done_function)(union ctl_io *io);
165 union ctl_io *ctl_io;
166 struct cfi_lun_io *orig_lun_io;
167 STAILQ_ENTRY(cfi_lun_io) links;
176 struct ctl_port port;
180 STAILQ_HEAD(, cfi_lun) lun_list;
181 STAILQ_HEAD(, cfi_metatask) metatask_list;
184 MALLOC_DEFINE(M_CTL_CFI, "ctlcfi", "CTL CFI");
186 static uma_zone_t cfi_lun_zone;
187 static uma_zone_t cfi_metatask_zone;
189 static struct cfi_softc fetd_internal_softc;
192 void cfi_shutdown(void) __unused;
193 static void cfi_online(void *arg);
194 static void cfi_offline(void *arg);
195 static int cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id);
196 static int cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id);
197 static void cfi_datamove(union ctl_io *io);
198 static cfi_error_action cfi_checkcond_parse(union ctl_io *io,
199 struct cfi_lun_io *lun_io);
200 static cfi_error_action cfi_error_parse(union ctl_io *io,
201 struct cfi_lun_io *lun_io);
202 static void cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
203 struct cfi_metatask *metatask, cfi_error_policy policy,
204 int retries, struct cfi_lun_io *orig_lun_io,
205 void (*done_function)(union ctl_io *io));
206 static void cfi_done(union ctl_io *io);
207 static void cfi_lun_probe_done(union ctl_io *io);
208 static void cfi_lun_probe(struct cfi_lun *lun, int have_lock);
209 static void cfi_metatask_done(struct cfi_softc *softc,
210 struct cfi_metatask *metatask);
211 static void cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask,
213 static void cfi_metatask_io_done(union ctl_io *io);
214 static void cfi_err_recovery_done(union ctl_io *io);
215 static void cfi_lun_io_done(union ctl_io *io);
217 static struct ctl_frontend cfi_frontend =
221 .shutdown = cfi_shutdown,
223 CTL_FRONTEND_DECLARE(ctlcfi, cfi_frontend);
228 struct cfi_softc *softc;
229 struct ctl_port *port;
232 softc = &fetd_internal_softc;
238 if (sizeof(struct cfi_lun_io) > CTL_PORT_PRIV_SIZE) {
239 printf("%s: size of struct cfi_lun_io %zd > "
240 "CTL_PORT_PRIV_SIZE %d\n", __func__,
241 sizeof(struct cfi_lun_io),
244 memset(softc, 0, sizeof(*softc));
246 mtx_init(&softc->lock, "CTL frontend mutex", NULL, MTX_DEF);
247 softc->flags |= CTL_FLAG_MASTER_SHELF;
249 STAILQ_INIT(&softc->lun_list);
250 STAILQ_INIT(&softc->metatask_list);
251 sprintf(softc->fe_name, "kernel");
252 port->frontend = &cfi_frontend;
253 port->port_type = CTL_PORT_INTERNAL;
254 port->num_requested_ctl_io = 100;
255 port->port_name = softc->fe_name;
256 port->port_online = cfi_online;
257 port->port_offline = cfi_offline;
258 port->onoff_arg = softc;
259 port->lun_enable = cfi_lun_enable;
260 port->lun_disable = cfi_lun_disable;
261 port->targ_lun_arg = softc;
262 port->fe_datamove = cfi_datamove;
263 port->fe_done = cfi_done;
264 port->max_targets = 15;
265 port->max_target_id = 15;
267 if (ctl_port_register(port, (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0)
269 printf("%s: internal frontend registration failed\n", __func__);
273 cfi_lun_zone = uma_zcreate("cfi_lun", sizeof(struct cfi_lun),
274 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
275 cfi_metatask_zone = uma_zcreate("cfi_metatask", sizeof(struct cfi_metatask),
276 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
284 struct cfi_softc *softc;
286 softc = &fetd_internal_softc;
289 * XXX KDM need to clear out any I/O pending on each LUN.
291 if (ctl_port_deregister(&softc->port) != 0)
292 printf("%s: ctl_frontend_deregister() failed\n", __func__);
294 uma_zdestroy(cfi_lun_zone);
295 uma_zdestroy(cfi_metatask_zone);
299 cfi_online(void *arg)
301 struct cfi_softc *softc;
304 softc = (struct cfi_softc *)arg;
306 softc->flags |= CFI_ONLINE;
309 * Go through and kick off the probe for each lun. Should we check
310 * the LUN flags here to determine whether or not to probe it?
312 mtx_lock(&softc->lock);
313 STAILQ_FOREACH(lun, &softc->lun_list, links)
314 cfi_lun_probe(lun, /*have_lock*/ 1);
315 mtx_unlock(&softc->lock);
319 cfi_offline(void *arg)
321 struct cfi_softc *softc;
323 softc = (struct cfi_softc *)arg;
325 softc->flags &= ~CFI_ONLINE;
329 cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
331 struct cfi_softc *softc;
335 softc = (struct cfi_softc *)arg;
338 mtx_lock(&softc->lock);
339 STAILQ_FOREACH(lun, &softc->lun_list, links) {
340 if ((lun->target_id.id == target_id.id)
341 && (lun->lun_id == lun_id)) {
346 mtx_unlock(&softc->lock);
349 * If we already have this target/LUN, there is no reason to add
350 * it to our lists again.
355 lun = uma_zalloc(cfi_lun_zone, M_NOWAIT | M_ZERO);
357 printf("%s: unable to allocate LUN structure\n", __func__);
361 lun->target_id = target_id;
362 lun->lun_id = lun_id;
363 lun->cur_tag_num = 0;
364 lun->state = CFI_LUN_INQUIRY;
366 STAILQ_INIT(&lun->io_list);
368 mtx_lock(&softc->lock);
369 STAILQ_INSERT_TAIL(&softc->lun_list, lun, links);
370 mtx_unlock(&softc->lock);
372 cfi_lun_probe(lun, /*have_lock*/ 0);
378 cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id)
380 struct cfi_softc *softc;
384 softc = (struct cfi_softc *)arg;
389 * XXX KDM need to do an invalidate and then a free when any
390 * pending I/O has completed. Or do we? CTL won't free a LUN
391 * while any I/O is pending. So we won't get this notification
392 * unless any I/O we have pending on a LUN has completed.
394 mtx_lock(&softc->lock);
395 STAILQ_FOREACH(lun, &softc->lun_list, links) {
396 if ((lun->target_id.id == target_id.id)
397 && (lun->lun_id == lun_id)) {
403 STAILQ_REMOVE(&softc->lun_list, lun, cfi_lun, links);
405 mtx_unlock(&softc->lock);
408 printf("%s: can't find target %ju lun %d\n", __func__,
409 (uintmax_t)target_id.id, lun_id);
413 uma_zfree(cfi_lun_zone, lun);
419 cfi_datamove(union ctl_io *io)
421 struct ctl_sg_entry *ext_sglist, *kern_sglist;
422 struct ctl_sg_entry ext_entry, kern_entry;
423 int ext_sglen, ext_sg_entries, kern_sg_entries;
424 int ext_sg_start, ext_offset;
425 int len_to_copy, len_copied;
426 int kern_watermark, ext_watermark;
427 int ext_sglist_malloced;
428 struct ctl_scsiio *ctsio;
431 ext_sglist_malloced = 0;
436 CTL_DEBUG_PRINT(("%s\n", __func__));
441 * If this is the case, we're probably doing a BBR read and don't
442 * actually need to transfer the data. This will effectively
443 * bit-bucket the data.
445 if (ctsio->ext_data_ptr == NULL)
449 * To simplify things here, if we have a single buffer, stick it in
450 * a S/G entry and just make it a single entry S/G list.
452 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
455 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
457 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL_CFI,
459 ext_sglist_malloced = 1;
460 if (memcpy(ext_sglist, ctsio->ext_data_ptr, ext_sglen) != 0) {
461 ctl_set_internal_failure(ctsio,
466 ext_sg_entries = ctsio->ext_sg_entries;
468 for (i = 0; i < ext_sg_entries; i++) {
469 if ((len_seen + ext_sglist[i].len) >=
470 ctsio->ext_data_filled) {
472 ext_offset = ctsio->ext_data_filled - len_seen;
475 len_seen += ext_sglist[i].len;
478 ext_sglist = &ext_entry;
479 ext_sglist->addr = ctsio->ext_data_ptr;
480 ext_sglist->len = ctsio->ext_data_len;
483 ext_offset = ctsio->ext_data_filled;
486 if (ctsio->kern_sg_entries > 0) {
487 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
488 kern_sg_entries = ctsio->kern_sg_entries;
490 kern_sglist = &kern_entry;
491 kern_sglist->addr = ctsio->kern_data_ptr;
492 kern_sglist->len = ctsio->kern_data_len;
498 ext_watermark = ext_offset;
500 for (i = ext_sg_start, j = 0;
501 i < ext_sg_entries && j < kern_sg_entries;) {
502 uint8_t *ext_ptr, *kern_ptr;
504 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark,
505 kern_sglist[j].len - kern_watermark);
507 ext_ptr = (uint8_t *)ext_sglist[i].addr;
508 ext_ptr = ext_ptr + ext_watermark;
509 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
513 panic("need to implement bus address support");
515 kern_ptr = bus_to_virt(kern_sglist[j].addr);
518 kern_ptr = (uint8_t *)kern_sglist[j].addr;
519 kern_ptr = kern_ptr + kern_watermark;
521 kern_watermark += len_to_copy;
522 ext_watermark += len_to_copy;
524 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
526 CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
527 __func__, len_to_copy));
528 CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
530 memcpy(ext_ptr, kern_ptr, len_to_copy);
532 CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n",
533 __func__, len_to_copy));
534 CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
536 memcpy(kern_ptr, ext_ptr, len_to_copy);
539 len_copied += len_to_copy;
541 if (ext_sglist[i].len == ext_watermark) {
546 if (kern_sglist[j].len == kern_watermark) {
552 ctsio->ext_data_filled += len_copied;
554 CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
555 __func__, ext_sg_entries, kern_sg_entries));
556 CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
557 __func__, ctsio->ext_data_len, ctsio->kern_data_len));
560 /* XXX KDM set residual?? */
563 if (ext_sglist_malloced != 0)
564 free(ext_sglist, M_CTL_CFI);
566 io->scsiio.be_move_done(io);
572 * For any sort of check condition, busy, etc., we just retry. We do not
573 * decrement the retry count for unit attention type errors. These are
574 * normal, and we want to save the retry count for "real" errors. Otherwise,
575 * we could end up with situations where a command will succeed in some
576 * situations and fail in others, depending on whether a unit attention is
577 * pending. Also, some of our error recovery actions, most notably the
578 * LUN reset action, will cause a unit attention.
580 * We can add more detail here later if necessary.
582 static cfi_error_action
583 cfi_checkcond_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
585 cfi_error_action error_action;
586 int error_code, sense_key, asc, ascq;
589 * Default to retrying the command.
591 error_action = CFI_ERR_RETRY;
593 scsi_extract_sense_len(&io->scsiio.sense_data,
594 io->scsiio.sense_len,
601 switch (error_code) {
602 case SSD_DEFERRED_ERROR:
603 case SSD_DESC_DEFERRED_ERROR:
604 error_action |= CFI_ERR_NO_DECREMENT;
606 case SSD_CURRENT_ERROR:
607 case SSD_DESC_CURRENT_ERROR:
610 case SSD_KEY_UNIT_ATTENTION:
611 error_action |= CFI_ERR_NO_DECREMENT;
613 case SSD_KEY_HARDWARE_ERROR:
615 * This is our generic "something bad happened"
616 * error code. It often isn't recoverable.
618 if ((asc == 0x44) && (ascq == 0x00))
619 error_action = CFI_ERR_FAIL;
621 case SSD_KEY_NOT_READY:
623 * If the LUN is powered down, there likely isn't
624 * much point in retrying right now.
626 if ((asc == 0x04) && (ascq == 0x02))
627 error_action = CFI_ERR_FAIL;
629 * If the LUN is offline, there probably isn't much
630 * point in retrying, either.
632 if ((asc == 0x04) && (ascq == 0x03))
633 error_action = CFI_ERR_FAIL;
639 return (error_action);
642 static cfi_error_action
643 cfi_error_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
645 cfi_error_action error_action;
647 error_action = CFI_ERR_RETRY;
649 switch (io->io_hdr.io_type) {
651 switch (io->io_hdr.status & CTL_STATUS_MASK) {
653 switch (io->scsiio.scsi_status) {
654 case SCSI_STATUS_RESERV_CONFLICT:
656 * For a reservation conflict, we'll usually
657 * want the hard error recovery policy, so
658 * we'll reset the LUN.
660 if (lun_io->policy == CFI_ERR_HARD)
667 case SCSI_STATUS_CHECK_COND:
669 error_action = cfi_checkcond_parse(io, lun_io);
674 error_action = CFI_ERR_RETRY;
680 * In theory task management commands shouldn't fail...
682 error_action = CFI_ERR_RETRY;
685 printf("%s: invalid ctl_io type %d\n", __func__,
687 panic("%s: invalid ctl_io type %d\n", __func__,
692 return (error_action);
696 cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
697 struct cfi_metatask *metatask, cfi_error_policy policy, int retries,
698 struct cfi_lun_io *orig_lun_io,
699 void (*done_function)(union ctl_io *io))
701 struct cfi_lun_io *lun_io;
703 io->io_hdr.nexus.initid.id = 7;
704 io->io_hdr.nexus.targ_port = lun->softc->port.targ_port;
705 io->io_hdr.nexus.targ_target.id = lun->target_id.id;
706 io->io_hdr.nexus.targ_lun = lun->lun_id;
707 io->io_hdr.retries = retries;
708 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
709 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = lun_io;
711 lun_io->metatask = metatask;
713 lun_io->policy = policy;
714 lun_io->orig_lun_io = orig_lun_io;
715 lun_io->done_function = done_function;
717 * We only set the tag number for SCSI I/Os. For task management
718 * commands, the tag number is only really needed for aborts, so
719 * the caller can set it if necessary.
721 switch (io->io_hdr.io_type) {
723 io->scsiio.tag_num = lun->cur_tag_num++;
732 cfi_done(union ctl_io *io)
734 struct cfi_lun_io *lun_io;
735 struct cfi_softc *softc;
738 lun_io = (struct cfi_lun_io *)
739 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
745 * Very minimal retry logic. We basically retry if we got an error
746 * back, and the retry count is greater than 0. If we ever want
747 * more sophisticated initiator type behavior, the CAM error
748 * recovery code in ../common might be helpful.
750 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
751 && (io->io_hdr.retries > 0)) {
752 ctl_io_status old_status;
753 cfi_error_action error_action;
755 error_action = cfi_error_parse(io, lun_io);
757 switch (error_action & CFI_ERR_MASK) {
760 break; /* NOTREACHED */
761 case CFI_ERR_LUN_RESET: {
762 union ctl_io *new_io;
763 struct cfi_lun_io *new_lun_io;
765 new_io = ctl_alloc_io(softc->port.ctl_pool_ref);
766 if (new_io == NULL) {
767 printf("%s: unable to allocate ctl_io for "
768 "error recovery\n", __func__);
773 new_io->io_hdr.io_type = CTL_IO_TASK;
774 new_io->taskio.task_action = CTL_TASK_LUN_RESET;
779 /*policy*/ CFI_ERR_SOFT,
781 /*orig_lun_io*/lun_io,
782 /*done_function*/ cfi_err_recovery_done);
785 new_lun_io = (struct cfi_lun_io *)
786 new_io->io_hdr.port_priv;
788 mtx_lock(&lun->softc->lock);
789 STAILQ_INSERT_TAIL(&lun->io_list, new_lun_io, links);
790 mtx_unlock(&lun->softc->lock);
797 if ((error_action & CFI_ERR_NO_DECREMENT) == 0)
798 io->io_hdr.retries--;
802 old_status = io->io_hdr.status;
803 io->io_hdr.status = CTL_STATUS_NONE;
805 io->io_hdr.flags &= ~CTL_FLAG_ALREADY_DONE;
807 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
808 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
810 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
811 printf("%s: error returned from ctl_queue()!\n",
813 io->io_hdr.status = old_status;
818 lun_io->done_function(io);
822 cfi_lun_probe_done(union ctl_io *io)
825 struct cfi_lun_io *lun_io;
827 lun_io = (struct cfi_lun_io *)
828 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
831 switch (lun->state) {
832 case CFI_LUN_INQUIRY: {
833 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
834 /* print out something here?? */
835 printf("%s: LUN %d probe failed because inquiry "
836 "failed\n", __func__, lun->lun_id);
837 ctl_io_error_print(io, NULL);
840 if (SID_TYPE(&lun->inq_data) != T_DIRECT) {
843 lun->state = CFI_LUN_READY;
844 ctl_scsi_path_string(io, path_str,
846 printf("%s", path_str);
847 scsi_print_inquiry(&lun->inq_data);
849 lun->state = CFI_LUN_READCAPACITY;
850 cfi_lun_probe(lun, /*have_lock*/ 0);
853 mtx_lock(&lun->softc->lock);
854 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
855 mtx_unlock(&lun->softc->lock);
859 case CFI_LUN_READCAPACITY:
860 case CFI_LUN_READCAPACITY_16: {
867 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
868 printf("%s: LUN %d probe failed because READ CAPACITY "
869 "failed\n", __func__, lun->lun_id);
870 ctl_io_error_print(io, NULL);
873 if (lun->state == CFI_LUN_READCAPACITY) {
874 struct scsi_read_capacity_data *rdcap;
876 rdcap = (struct scsi_read_capacity_data *)
877 io->scsiio.ext_data_ptr;
879 maxlba = scsi_4btoul(rdcap->addr);
880 blocksize = scsi_4btoul(rdcap->length);
881 if (blocksize == 0) {
882 printf("%s: LUN %d has invalid "
883 "blocksize 0, probe aborted\n",
884 __func__, lun->lun_id);
885 } else if (maxlba == 0xffffffff) {
886 lun->state = CFI_LUN_READCAPACITY_16;
887 cfi_lun_probe(lun, /*have_lock*/ 0);
889 lun->state = CFI_LUN_READY;
891 struct scsi_read_capacity_data_long *rdcap_long;
894 scsi_read_capacity_data_long *)
895 io->scsiio.ext_data_ptr;
896 maxlba = scsi_8btou64(rdcap_long->addr);
897 blocksize = scsi_4btoul(rdcap_long->length);
899 if (blocksize == 0) {
900 printf("%s: LUN %d has invalid "
901 "blocksize 0, probe aborted\n",
902 __func__, lun->lun_id);
904 lun->state = CFI_LUN_READY;
908 if (lun->state == CFI_LUN_READY) {
911 lun->num_blocks = maxlba + 1;
912 lun->blocksize = blocksize;
915 * If this is true, the blocksize is a power of 2.
916 * We already checked for 0 above.
918 if (((blocksize - 1) & blocksize) == 0) {
921 for (i = 0; i < 32; i++) {
922 if ((blocksize & (1 << i)) != 0) {
923 lun->blocksize_powerof2 = i;
928 ctl_scsi_path_string(io, path_str,sizeof(path_str));
929 printf("%s", path_str);
930 scsi_print_inquiry(&lun->inq_data);
931 printf("%s %ju blocks, blocksize %d\n", path_str,
932 (uintmax_t)maxlba + 1, blocksize);
934 mtx_lock(&lun->softc->lock);
935 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
936 mtx_unlock(&lun->softc->lock);
937 free(io->scsiio.ext_data_ptr, M_CTL_CFI);
943 mtx_lock(&lun->softc->lock);
944 /* How did we get here?? */
945 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
946 mtx_unlock(&lun->softc->lock);
953 cfi_lun_probe(struct cfi_lun *lun, int have_lock)
957 mtx_lock(&lun->softc->lock);
958 if ((lun->softc->flags & CFI_ONLINE) == 0) {
960 mtx_unlock(&lun->softc->lock);
964 mtx_unlock(&lun->softc->lock);
966 switch (lun->state) {
967 case CFI_LUN_INQUIRY: {
968 struct cfi_lun_io *lun_io;
971 io = ctl_alloc_io(lun->softc->port.ctl_pool_ref);
973 printf("%s: unable to alloc ctl_io for target %ju "
974 "lun %d probe\n", __func__,
975 (uintmax_t)lun->target_id.id, lun->lun_id);
979 /*data_ptr*/(uint8_t *)&lun->inq_data,
980 /*data_len*/ sizeof(lun->inq_data),
983 /*tag_type*/ CTL_TAG_SIMPLE,
989 /*policy*/ CFI_ERR_SOFT,
991 /*orig_lun_io*/ NULL,
995 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
998 mtx_lock(&lun->softc->lock);
999 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1001 mtx_unlock(&lun->softc->lock);
1003 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1004 printf("%s: error returned from ctl_queue()!\n",
1006 STAILQ_REMOVE(&lun->io_list, lun_io,
1012 case CFI_LUN_READCAPACITY:
1013 case CFI_LUN_READCAPACITY_16: {
1014 struct cfi_lun_io *lun_io;
1018 io = ctl_alloc_io(lun->softc->port.ctl_pool_ref);
1020 printf("%s: unable to alloc ctl_io for target %ju "
1021 "lun %d probe\n", __func__,
1022 (uintmax_t)lun->target_id.id, lun->lun_id);
1026 dataptr = malloc(sizeof(struct scsi_read_capacity_data_long),
1027 M_CTL_CFI, M_NOWAIT);
1028 if (dataptr == NULL) {
1029 printf("%s: unable to allocate SCSI read capacity "
1030 "buffer for target %ju lun %d\n", __func__,
1031 (uintmax_t)lun->target_id.id, lun->lun_id);
1034 if (lun->state == CFI_LUN_READCAPACITY) {
1035 ctl_scsi_read_capacity(io,
1036 /*data_ptr*/ dataptr,
1038 sizeof(struct scsi_read_capacity_data_long),
1042 /*tag_type*/ CTL_TAG_SIMPLE,
1045 ctl_scsi_read_capacity_16(io,
1046 /*data_ptr*/ dataptr,
1048 sizeof(struct scsi_read_capacity_data_long),
1052 /*tag_type*/ CTL_TAG_SIMPLE,
1058 /*policy*/ CFI_ERR_SOFT,
1060 /*orig_lun_io*/ NULL,
1061 /*done_function*/ cfi_lun_probe_done);
1063 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1066 mtx_lock(&lun->softc->lock);
1067 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1069 mtx_unlock(&lun->softc->lock);
1071 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1072 printf("%s: error returned from ctl_queue()!\n",
1074 STAILQ_REMOVE(&lun->io_list, lun_io,
1076 free(dataptr, M_CTL_CFI);
1083 /* Why were we called? */
1089 cfi_metatask_done(struct cfi_softc *softc, struct cfi_metatask *metatask)
1091 mtx_lock(&softc->lock);
1092 STAILQ_REMOVE(&softc->metatask_list, metatask, cfi_metatask, links);
1093 mtx_unlock(&softc->lock);
1096 * Return status to the caller. Caller allocated storage, and is
1097 * responsible for calling cfi_free_metatask to release it once
1098 * they've seen the status.
1100 metatask->callback(metatask->callback_arg, metatask);
1104 cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask, union ctl_io *io)
1106 int error_code, sense_key, asc, ascq;
1108 if (metatask->tasktype != CFI_TASK_BBRREAD)
1111 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
1112 metatask->status = CFI_MT_SUCCESS;
1113 metatask->taskinfo.bbrread.status = CFI_BBR_SUCCESS;
1117 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) {
1118 metatask->status = CFI_MT_ERROR;
1119 metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1123 metatask->taskinfo.bbrread.scsi_status = io->scsiio.scsi_status;
1124 memcpy(&metatask->taskinfo.bbrread.sense_data, &io->scsiio.sense_data,
1125 ctl_min(sizeof(metatask->taskinfo.bbrread.sense_data),
1126 sizeof(io->scsiio.sense_data)));
1128 if (io->scsiio.scsi_status == SCSI_STATUS_RESERV_CONFLICT) {
1129 metatask->status = CFI_MT_ERROR;
1130 metatask->taskinfo.bbrread.status = CFI_BBR_RESERV_CONFLICT;
1134 if (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) {
1135 metatask->status = CFI_MT_ERROR;
1136 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1140 scsi_extract_sense_len(&io->scsiio.sense_data,
1141 io->scsiio.sense_len,
1148 switch (error_code) {
1149 case SSD_DEFERRED_ERROR:
1150 case SSD_DESC_DEFERRED_ERROR:
1151 metatask->status = CFI_MT_ERROR;
1152 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1154 case SSD_CURRENT_ERROR:
1155 case SSD_DESC_CURRENT_ERROR:
1157 struct scsi_sense_data *sense;
1159 sense = &io->scsiio.sense_data;
1161 if ((asc == 0x04) && (ascq == 0x02)) {
1162 metatask->status = CFI_MT_ERROR;
1163 metatask->taskinfo.bbrread.status = CFI_BBR_LUN_STOPPED;
1164 } else if ((asc == 0x04) && (ascq == 0x03)) {
1165 metatask->status = CFI_MT_ERROR;
1166 metatask->taskinfo.bbrread.status =
1167 CFI_BBR_LUN_OFFLINE_CTL;
1168 } else if ((asc == 0x44) && (ascq == 0x00)) {
1170 if (sense->sense_key_spec[0] & SSD_SCS_VALID) {
1171 uint16_t retry_count;
1173 retry_count = sense->sense_key_spec[1] << 8 |
1174 sense->sense_key_spec[2];
1175 if (((retry_count & 0xf000) == CSC_RAIDCORE)
1176 && ((retry_count & 0x0f00) == CSC_SHELF_SW)
1177 && ((retry_count & 0xff) ==
1178 RC_STS_DEVICE_OFFLINE)) {
1179 metatask->status = CFI_MT_ERROR;
1180 metatask->taskinfo.bbrread.status =
1181 CFI_BBR_LUN_OFFLINE_RC;
1183 metatask->status = CFI_MT_ERROR;
1184 metatask->taskinfo.bbrread.status =
1188 #endif /* NEEDTOPORT */
1189 metatask->status = CFI_MT_ERROR;
1190 metatask->taskinfo.bbrread.status =
1196 metatask->status = CFI_MT_ERROR;
1197 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1205 cfi_metatask_io_done(union ctl_io *io)
1207 struct cfi_lun_io *lun_io;
1208 struct cfi_metatask *metatask;
1209 struct cfi_softc *softc;
1210 struct cfi_lun *lun;
1212 lun_io = (struct cfi_lun_io *)
1213 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1218 metatask = lun_io->metatask;
1220 switch (metatask->tasktype) {
1221 case CFI_TASK_STARTUP:
1222 case CFI_TASK_SHUTDOWN: {
1223 int failed, done, is_start;
1227 if (metatask->tasktype == CFI_TASK_STARTUP)
1232 mtx_lock(&softc->lock);
1233 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
1234 metatask->taskinfo.startstop.luns_complete++;
1236 metatask->taskinfo.startstop.luns_failed++;
1239 if ((metatask->taskinfo.startstop.luns_complete +
1240 metatask->taskinfo.startstop.luns_failed) >=
1241 metatask->taskinfo.startstop.total_luns)
1244 mtx_unlock(&softc->lock);
1247 printf("%s: LUN %d %s request failed\n", __func__,
1248 lun_io->lun->lun_id, (is_start == 1) ? "start" :
1250 ctl_io_error_print(io, &lun_io->lun->inq_data);
1253 if (metatask->taskinfo.startstop.luns_failed > 0)
1254 metatask->status = CFI_MT_ERROR;
1256 metatask->status = CFI_MT_SUCCESS;
1257 cfi_metatask_done(softc, metatask);
1259 mtx_lock(&softc->lock);
1260 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1261 mtx_unlock(&softc->lock);
1266 case CFI_TASK_BBRREAD: {
1268 * Translate the SCSI error into an enumeration.
1270 cfi_metatask_bbr_errorparse(metatask, io);
1272 mtx_lock(&softc->lock);
1273 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1274 mtx_unlock(&softc->lock);
1278 cfi_metatask_done(softc, metatask);
1283 * This shouldn't happen.
1285 mtx_lock(&softc->lock);
1286 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1287 mtx_unlock(&softc->lock);
1295 cfi_err_recovery_done(union ctl_io *io)
1297 struct cfi_lun_io *lun_io, *orig_lun_io;
1298 struct cfi_lun *lun;
1299 union ctl_io *orig_io;
1301 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1302 orig_lun_io = lun_io->orig_lun_io;
1303 orig_io = orig_lun_io->ctl_io;
1306 if (io->io_hdr.status != CTL_SUCCESS) {
1307 printf("%s: error recovery action failed. Original "
1308 "error:\n", __func__);
1310 ctl_io_error_print(orig_lun_io->ctl_io, &lun->inq_data);
1312 printf("%s: error from error recovery action:\n", __func__);
1314 ctl_io_error_print(io, &lun->inq_data);
1316 printf("%s: trying original command again...\n", __func__);
1319 mtx_lock(&lun->softc->lock);
1320 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1321 mtx_unlock(&lun->softc->lock);
1324 orig_io->io_hdr.retries--;
1325 orig_io->io_hdr.status = CTL_STATUS_NONE;
1327 if (ctl_queue(orig_io) != CTL_RETVAL_COMPLETE) {
1328 printf("%s: error returned from ctl_queue()!\n", __func__);
1329 STAILQ_REMOVE(&lun->io_list, orig_lun_io,
1331 ctl_free_io(orig_io);
1336 cfi_lun_io_done(union ctl_io *io)
1338 struct cfi_lun *lun;
1339 struct cfi_lun_io *lun_io;
1341 lun_io = (struct cfi_lun_io *)
1342 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1345 if (lun_io->metatask == NULL) {
1346 printf("%s: I/O has no metatask pointer, discarding\n",
1348 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1352 cfi_metatask_io_done(io);
1356 cfi_action(struct cfi_metatask *metatask)
1358 struct cfi_softc *softc;
1360 softc = &fetd_internal_softc;
1362 mtx_lock(&softc->lock);
1364 STAILQ_INSERT_TAIL(&softc->metatask_list, metatask, links);
1366 if ((softc->flags & CFI_ONLINE) == 0) {
1367 mtx_unlock(&softc->lock);
1368 metatask->status = CFI_MT_PORT_OFFLINE;
1369 cfi_metatask_done(softc, metatask);
1372 mtx_unlock(&softc->lock);
1374 switch (metatask->tasktype) {
1375 case CFI_TASK_STARTUP:
1376 case CFI_TASK_SHUTDOWN: {
1378 int da_luns, ios_allocated, do_start;
1379 struct cfi_lun *lun;
1380 STAILQ_HEAD(, ctl_io_hdr) tmp_io_list;
1384 STAILQ_INIT(&tmp_io_list);
1386 if (metatask->tasktype == CFI_TASK_STARTUP)
1391 mtx_lock(&softc->lock);
1392 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1393 if (lun->state != CFI_LUN_READY)
1396 if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1399 io = ctl_alloc_io(softc->port.ctl_pool_ref);
1402 STAILQ_INSERT_TAIL(&tmp_io_list, &io->io_hdr,
1407 if (ios_allocated < da_luns) {
1408 printf("%s: error allocating ctl_io for %s\n",
1409 __func__, (do_start == 1) ? "startup" :
1411 da_luns = ios_allocated;
1414 metatask->taskinfo.startstop.total_luns = da_luns;
1416 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1417 struct cfi_lun_io *lun_io;
1419 if (lun->state != CFI_LUN_READY)
1422 if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1425 io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1429 STAILQ_REMOVE(&tmp_io_list, &io->io_hdr, ctl_io_hdr,
1432 ctl_scsi_start_stop(io,
1436 /*power_conditions*/
1439 /*ctl_tag_type*/ CTL_TAG_ORDERED,
1444 /*metatask*/ metatask,
1445 /*policy*/ CFI_ERR_HARD,
1447 /*orig_lun_io*/ NULL,
1448 /*done_function*/ cfi_lun_io_done);
1450 lun_io = (struct cfi_lun_io *) io->io_hdr.port_priv;
1452 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1454 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1455 printf("%s: error returned from ctl_queue()!\n",
1457 STAILQ_REMOVE(&lun->io_list, lun_io,
1460 metatask->taskinfo.startstop.total_luns--;
1464 if (STAILQ_FIRST(&tmp_io_list) != NULL) {
1465 printf("%s: error: tmp_io_list != NULL\n", __func__);
1466 for (io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1468 io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list)) {
1469 STAILQ_REMOVE(&tmp_io_list, &io->io_hdr,
1474 mtx_unlock(&softc->lock);
1478 case CFI_TASK_BBRREAD: {
1480 struct cfi_lun *lun;
1481 struct cfi_lun_io *lun_io;
1482 cfi_bbrread_status status;
1484 uint32_t num_blocks;
1486 status = CFI_BBR_SUCCESS;
1488 req_lun_num = metatask->taskinfo.bbrread.lun_num;
1490 mtx_lock(&softc->lock);
1491 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1492 if (lun->lun_id != req_lun_num)
1494 if (lun->state != CFI_LUN_READY) {
1495 status = CFI_BBR_LUN_UNCONFIG;
1502 status = CFI_BBR_NO_LUN;
1504 if (status != CFI_BBR_SUCCESS) {
1505 metatask->status = CFI_MT_ERROR;
1506 metatask->taskinfo.bbrread.status = status;
1507 mtx_unlock(&softc->lock);
1508 cfi_metatask_done(softc, metatask);
1513 * Convert the number of bytes given into blocks and check
1514 * that the number of bytes is a multiple of the blocksize.
1515 * CTL will verify that the LBA is okay.
1517 if (lun->blocksize_powerof2 != 0) {
1518 if ((metatask->taskinfo.bbrread.len &
1519 (lun->blocksize - 1)) != 0) {
1520 metatask->status = CFI_MT_ERROR;
1521 metatask->taskinfo.bbrread.status =
1523 cfi_metatask_done(softc, metatask);
1527 num_blocks = metatask->taskinfo.bbrread.len >>
1528 lun->blocksize_powerof2;
1531 * XXX KDM this could result in floating point
1532 * division, which isn't supported in the kernel on
1535 if ((metatask->taskinfo.bbrread.len %
1536 lun->blocksize) != 0) {
1537 metatask->status = CFI_MT_ERROR;
1538 metatask->taskinfo.bbrread.status =
1540 cfi_metatask_done(softc, metatask);
1545 * XXX KDM this could result in floating point
1546 * division in some cases.
1548 num_blocks = metatask->taskinfo.bbrread.len /
1553 io = ctl_alloc_io(softc->port.ctl_pool_ref);
1555 metatask->status = CFI_MT_ERROR;
1556 metatask->taskinfo.bbrread.status = CFI_BBR_NO_MEM;
1557 mtx_unlock(&softc->lock);
1558 cfi_metatask_done(softc, metatask);
1563 * XXX KDM need to do a read capacity to get the blocksize
1566 ctl_scsi_read_write(io,
1568 /*data_len*/ metatask->taskinfo.bbrread.len,
1571 /*minimum_cdb_size*/ 0,
1572 /*lba*/ metatask->taskinfo.bbrread.lba,
1573 /*num_blocks*/ num_blocks,
1574 /*tag_type*/ CTL_TAG_SIMPLE,
1579 /*metatask*/ metatask,
1580 /*policy*/ CFI_ERR_SOFT,
1582 /*orig_lun_io*/ NULL,
1583 /*done_function*/ cfi_lun_io_done);
1585 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1587 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1589 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1590 printf("%s: error returned from ctl_queue()!\n",
1592 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1594 metatask->status = CFI_MT_ERROR;
1595 metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1596 mtx_unlock(&softc->lock);
1597 cfi_metatask_done(softc, metatask);
1601 mtx_unlock(&softc->lock);
1605 panic("invalid metatask type %d", metatask->tasktype);
1606 break; /* NOTREACHED */
1610 struct cfi_metatask *
1611 cfi_alloc_metatask(int can_wait)
1613 struct cfi_metatask *metatask;
1614 struct cfi_softc *softc;
1616 softc = &fetd_internal_softc;
1618 metatask = uma_zalloc(cfi_metatask_zone,
1619 (can_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
1620 if (metatask == NULL)
1623 metatask->status = CFI_MT_NONE;
1629 cfi_free_metatask(struct cfi_metatask *metatask)
1632 uma_zfree(cfi_metatask_zone, metatask);