2 * Copyright (c) 2004, 2005 Silicon Graphics International Corp.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * substantially similar to the "NO WARRANTY" disclaimer below
13 * ("Disclaimer") and any redistribution must be conditioned upon
14 * including a substantially similar Disclaimer requirement for further
15 * binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_internal.c#5 $
33 * CTL kernel internal frontend target driver. This allows kernel-level
34 * clients to send commands into CTL.
36 * This has elements of a FETD (e.g. it has to set tag numbers, initiator,
37 * port, target, and LUN) and elements of an initiator (LUN discovery and
38 * probing, error recovery, command initiation). Even though this has some
39 * initiator type elements, this is not intended to be a full fledged
40 * initiator layer. It is only intended to send a limited number of
41 * commands to a well known target layer.
43 * To be able to fulfill the role of a full initiator layer, it would need
44 * a whole lot more functionality.
46 * Author: Ken Merry <ken@FreeBSD.org>
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/types.h>
57 #include <sys/malloc.h>
58 #include <sys/module.h>
60 #include <sys/mutex.h>
61 #include <sys/condvar.h>
62 #include <sys/queue.h>
64 #include <sys/sysctl.h>
66 #include <cam/scsi/scsi_all.h>
67 #include <cam/scsi/scsi_da.h>
68 #include <cam/ctl/ctl_io.h>
69 #include <cam/ctl/ctl.h>
70 #include <cam/ctl/ctl_frontend.h>
71 #include <cam/ctl/ctl_frontend_internal.h>
72 #include <cam/ctl/ctl_backend.h>
73 #include <cam/ctl/ctl_ioctl.h>
74 #include <cam/ctl/ctl_util.h>
75 #include <cam/ctl/ctl_ha.h>
76 #include <cam/ctl/ctl_private.h>
77 #include <cam/ctl/ctl_debug.h>
78 #include <cam/ctl/ctl_scsi_all.h>
79 #include <cam/ctl/ctl_error.h>
83 * - overall metatask, different potential metatask types (e.g. forced
84 * shutdown, gentle shutdown)
85 * - forced shutdown metatask:
86 * - states: report luns, pending, done?
87 * - list of luns pending, with the relevant I/O for that lun attached.
88 * This would allow moving ahead on LUNs with no errors, and going
89 * into error recovery on LUNs with problems. Per-LUN states might
90 * include inquiry, stop/offline, done.
92 * Use LUN enable for LUN list instead of getting it manually? We'd still
93 * need inquiry data for each LUN.
95 * How to handle processor LUN w.r.t. found/stopped counts?
104 struct cfi_task_startstop {
110 /* XXX KDM add more fields here */
114 struct cfi_task_startstop startstop;
117 struct cfi_metatask {
118 cfi_tasktype tasktype;
119 cfi_mt_status status;
120 union cfi_taskinfo taskinfo;
122 STAILQ_ENTRY(cfi_metatask) links;
127 CFI_ERR_RETRY = 0x000,
128 CFI_ERR_FAIL = 0x001,
129 CFI_ERR_LUN_RESET = 0x002,
130 CFI_ERR_MASK = 0x0ff,
131 CFI_ERR_NO_DECREMENT = 0x100
141 CFI_LUN_READCAPACITY,
142 CFI_LUN_READCAPACITY_16,
147 struct ctl_id target_id;
149 struct scsi_inquiry_data inq_data;
152 int blocksize_powerof2;
153 uint32_t cur_tag_num;
155 struct cfi_softc *softc;
156 STAILQ_HEAD(, cfi_lun_io) io_list;
157 STAILQ_ENTRY(cfi_lun) links;
162 struct cfi_metatask *metatask;
163 cfi_error_policy policy;
164 void (*done_function)(union ctl_io *io);
165 union ctl_io *ctl_io;
166 struct cfi_lun_io *orig_lun_io;
167 STAILQ_ENTRY(cfi_lun_io) links;
176 struct ctl_frontend fe;
180 STAILQ_HEAD(, cfi_lun) lun_list;
181 STAILQ_HEAD(, cfi_metatask) metatask_list;
184 MALLOC_DEFINE(M_CTL_CFI, "ctlcfi", "CTL CFI");
186 static uma_zone_t cfi_lun_zone;
187 static uma_zone_t cfi_metatask_zone;
189 static struct cfi_softc fetd_internal_softc;
190 extern int ctl_disable;
193 void cfi_shutdown(void) __unused;
194 static void cfi_online(void *arg);
195 static void cfi_offline(void *arg);
196 static int cfi_targ_enable(void *arg, struct ctl_id targ_id);
197 static int cfi_targ_disable(void *arg, struct ctl_id targ_id);
198 static int cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id);
199 static int cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id);
200 static void cfi_datamove(union ctl_io *io);
201 static cfi_error_action cfi_checkcond_parse(union ctl_io *io,
202 struct cfi_lun_io *lun_io);
203 static cfi_error_action cfi_error_parse(union ctl_io *io,
204 struct cfi_lun_io *lun_io);
205 static void cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
206 struct cfi_metatask *metatask, cfi_error_policy policy,
207 int retries, struct cfi_lun_io *orig_lun_io,
208 void (*done_function)(union ctl_io *io));
209 static void cfi_done(union ctl_io *io);
210 static void cfi_lun_probe_done(union ctl_io *io);
211 static void cfi_lun_probe(struct cfi_lun *lun, int have_lock);
212 static void cfi_metatask_done(struct cfi_softc *softc,
213 struct cfi_metatask *metatask);
214 static void cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask,
216 static void cfi_metatask_io_done(union ctl_io *io);
217 static void cfi_err_recovery_done(union ctl_io *io);
218 static void cfi_lun_io_done(union ctl_io *io);
220 static int cfi_module_event_handler(module_t, int /*modeventtype_t*/, void *);
222 static moduledata_t cfi_moduledata = {
224 cfi_module_event_handler,
228 DECLARE_MODULE(ctlcfi, cfi_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH);
229 MODULE_VERSION(ctlcfi, 1);
230 MODULE_DEPEND(ctlcfi, ctl, 1, 1, 1);
235 struct cfi_softc *softc;
236 struct ctl_frontend *fe;
239 softc = &fetd_internal_softc;
245 /* If we're disabled, don't initialize */
246 if (ctl_disable != 0)
249 if (sizeof(struct cfi_lun_io) > CTL_PORT_PRIV_SIZE) {
250 printf("%s: size of struct cfi_lun_io %zd > "
251 "CTL_PORT_PRIV_SIZE %d\n", __func__,
252 sizeof(struct cfi_lun_io),
255 memset(softc, 0, sizeof(*softc));
257 mtx_init(&softc->lock, "CTL frontend mutex", NULL, MTX_DEF);
258 softc->flags |= CTL_FLAG_MASTER_SHELF;
260 STAILQ_INIT(&softc->lun_list);
261 STAILQ_INIT(&softc->metatask_list);
262 sprintf(softc->fe_name, "CTL internal");
263 fe->port_type = CTL_PORT_INTERNAL;
264 fe->num_requested_ctl_io = 100;
265 fe->port_name = softc->fe_name;
266 fe->port_online = cfi_online;
267 fe->port_offline = cfi_offline;
268 fe->onoff_arg = softc;
269 fe->targ_enable = cfi_targ_enable;
270 fe->targ_disable = cfi_targ_disable;
271 fe->lun_enable = cfi_lun_enable;
272 fe->lun_disable = cfi_lun_disable;
273 fe->targ_lun_arg = softc;
274 fe->fe_datamove = cfi_datamove;
275 fe->fe_done = cfi_done;
276 fe->max_targets = 15;
277 fe->max_target_id = 15;
279 if (ctl_frontend_register(fe, (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0)
281 printf("%s: internal frontend registration failed\n", __func__);
285 cfi_lun_zone = uma_zcreate("cfi_lun", sizeof(struct cfi_lun),
286 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
287 cfi_metatask_zone = uma_zcreate("cfi_metatask", sizeof(struct cfi_metatask),
288 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
296 struct cfi_softc *softc;
298 softc = &fetd_internal_softc;
301 * XXX KDM need to clear out any I/O pending on each LUN.
303 if (ctl_frontend_deregister(&softc->fe) != 0)
304 printf("%s: ctl_frontend_deregister() failed\n", __func__);
306 uma_zdestroy(cfi_lun_zone);
307 uma_zdestroy(cfi_metatask_zone);
311 cfi_module_event_handler(module_t mod, int what, void *arg)
325 cfi_online(void *arg)
327 struct cfi_softc *softc;
330 softc = (struct cfi_softc *)arg;
332 softc->flags |= CFI_ONLINE;
335 * Go through and kick off the probe for each lun. Should we check
336 * the LUN flags here to determine whether or not to probe it?
338 mtx_lock(&softc->lock);
339 STAILQ_FOREACH(lun, &softc->lun_list, links)
340 cfi_lun_probe(lun, /*have_lock*/ 1);
341 mtx_unlock(&softc->lock);
345 cfi_offline(void *arg)
347 struct cfi_softc *softc;
349 softc = (struct cfi_softc *)arg;
351 softc->flags &= ~CFI_ONLINE;
355 cfi_targ_enable(void *arg, struct ctl_id targ_id)
361 cfi_targ_disable(void *arg, struct ctl_id targ_id)
367 cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
369 struct cfi_softc *softc;
373 softc = (struct cfi_softc *)arg;
376 mtx_lock(&softc->lock);
377 STAILQ_FOREACH(lun, &softc->lun_list, links) {
378 if ((lun->target_id.id == target_id.id)
379 && (lun->lun_id == lun_id)) {
384 mtx_unlock(&softc->lock);
387 * If we already have this target/LUN, there is no reason to add
388 * it to our lists again.
393 lun = uma_zalloc(cfi_lun_zone, M_NOWAIT | M_ZERO);
395 printf("%s: unable to allocate LUN structure\n", __func__);
399 lun->target_id = target_id;
400 lun->lun_id = lun_id;
401 lun->cur_tag_num = 0;
402 lun->state = CFI_LUN_INQUIRY;
404 STAILQ_INIT(&lun->io_list);
406 mtx_lock(&softc->lock);
407 STAILQ_INSERT_TAIL(&softc->lun_list, lun, links);
408 mtx_unlock(&softc->lock);
410 cfi_lun_probe(lun, /*have_lock*/ 0);
416 cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id)
418 struct cfi_softc *softc;
422 softc = (struct cfi_softc *)arg;
427 * XXX KDM need to do an invalidate and then a free when any
428 * pending I/O has completed. Or do we? CTL won't free a LUN
429 * while any I/O is pending. So we won't get this notification
430 * unless any I/O we have pending on a LUN has completed.
432 mtx_lock(&softc->lock);
433 STAILQ_FOREACH(lun, &softc->lun_list, links) {
434 if ((lun->target_id.id == target_id.id)
435 && (lun->lun_id == lun_id)) {
441 STAILQ_REMOVE(&softc->lun_list, lun, cfi_lun, links);
443 mtx_unlock(&softc->lock);
446 printf("%s: can't find target %ju lun %d\n", __func__,
447 (uintmax_t)target_id.id, lun_id);
451 uma_zfree(cfi_lun_zone, lun);
457 cfi_datamove(union ctl_io *io)
459 struct ctl_sg_entry *ext_sglist, *kern_sglist;
460 struct ctl_sg_entry ext_entry, kern_entry;
461 int ext_sglen, ext_sg_entries, kern_sg_entries;
462 int ext_sg_start, ext_offset;
463 int len_to_copy, len_copied;
464 int kern_watermark, ext_watermark;
465 int ext_sglist_malloced;
466 struct ctl_scsiio *ctsio;
469 ext_sglist_malloced = 0;
474 CTL_DEBUG_PRINT(("%s\n", __func__));
479 * If this is the case, we're probably doing a BBR read and don't
480 * actually need to transfer the data. This will effectively
481 * bit-bucket the data.
483 if (ctsio->ext_data_ptr == NULL)
487 * To simplify things here, if we have a single buffer, stick it in
488 * a S/G entry and just make it a single entry S/G list.
490 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
493 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
495 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL_CFI,
497 ext_sglist_malloced = 1;
498 if (memcpy(ext_sglist, ctsio->ext_data_ptr, ext_sglen) != 0) {
499 ctl_set_internal_failure(ctsio,
504 ext_sg_entries = ctsio->ext_sg_entries;
506 for (i = 0; i < ext_sg_entries; i++) {
507 if ((len_seen + ext_sglist[i].len) >=
508 ctsio->ext_data_filled) {
510 ext_offset = ctsio->ext_data_filled - len_seen;
513 len_seen += ext_sglist[i].len;
516 ext_sglist = &ext_entry;
517 ext_sglist->addr = ctsio->ext_data_ptr;
518 ext_sglist->len = ctsio->ext_data_len;
521 ext_offset = ctsio->ext_data_filled;
524 if (ctsio->kern_sg_entries > 0) {
525 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
526 kern_sg_entries = ctsio->kern_sg_entries;
528 kern_sglist = &kern_entry;
529 kern_sglist->addr = ctsio->kern_data_ptr;
530 kern_sglist->len = ctsio->kern_data_len;
536 ext_watermark = ext_offset;
538 for (i = ext_sg_start, j = 0;
539 i < ext_sg_entries && j < kern_sg_entries;) {
540 uint8_t *ext_ptr, *kern_ptr;
542 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark,
543 kern_sglist[j].len - kern_watermark);
545 ext_ptr = (uint8_t *)ext_sglist[i].addr;
546 ext_ptr = ext_ptr + ext_watermark;
547 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
551 panic("need to implement bus address support");
553 kern_ptr = bus_to_virt(kern_sglist[j].addr);
556 kern_ptr = (uint8_t *)kern_sglist[j].addr;
557 kern_ptr = kern_ptr + kern_watermark;
559 kern_watermark += len_to_copy;
560 ext_watermark += len_to_copy;
562 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
564 CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
565 __func__, len_to_copy));
566 CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
568 memcpy(ext_ptr, kern_ptr, len_to_copy);
570 CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n",
571 __func__, len_to_copy));
572 CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
574 memcpy(kern_ptr, ext_ptr, len_to_copy);
577 len_copied += len_to_copy;
579 if (ext_sglist[i].len == ext_watermark) {
584 if (kern_sglist[j].len == kern_watermark) {
590 ctsio->ext_data_filled += len_copied;
592 CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
593 __func__, ext_sg_entries, kern_sg_entries));
594 CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
595 __func__, ctsio->ext_data_len, ctsio->kern_data_len));
598 /* XXX KDM set residual?? */
601 if (ext_sglist_malloced != 0)
602 free(ext_sglist, M_CTL_CFI);
604 io->scsiio.be_move_done(io);
610 * For any sort of check condition, busy, etc., we just retry. We do not
611 * decrement the retry count for unit attention type errors. These are
612 * normal, and we want to save the retry count for "real" errors. Otherwise,
613 * we could end up with situations where a command will succeed in some
614 * situations and fail in others, depending on whether a unit attention is
615 * pending. Also, some of our error recovery actions, most notably the
616 * LUN reset action, will cause a unit attention.
618 * We can add more detail here later if necessary.
620 static cfi_error_action
621 cfi_checkcond_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
623 cfi_error_action error_action;
624 int error_code, sense_key, asc, ascq;
627 * Default to retrying the command.
629 error_action = CFI_ERR_RETRY;
631 scsi_extract_sense_len(&io->scsiio.sense_data,
632 io->scsiio.sense_len,
639 switch (error_code) {
640 case SSD_DEFERRED_ERROR:
641 case SSD_DESC_DEFERRED_ERROR:
642 error_action |= CFI_ERR_NO_DECREMENT;
644 case SSD_CURRENT_ERROR:
645 case SSD_DESC_CURRENT_ERROR:
648 case SSD_KEY_UNIT_ATTENTION:
649 error_action |= CFI_ERR_NO_DECREMENT;
651 case SSD_KEY_HARDWARE_ERROR:
653 * This is our generic "something bad happened"
654 * error code. It often isn't recoverable.
656 if ((asc == 0x44) && (ascq == 0x00))
657 error_action = CFI_ERR_FAIL;
659 case SSD_KEY_NOT_READY:
661 * If the LUN is powered down, there likely isn't
662 * much point in retrying right now.
664 if ((asc == 0x04) && (ascq == 0x02))
665 error_action = CFI_ERR_FAIL;
667 * If the LUN is offline, there probably isn't much
668 * point in retrying, either.
670 if ((asc == 0x04) && (ascq == 0x03))
671 error_action = CFI_ERR_FAIL;
677 return (error_action);
680 static cfi_error_action
681 cfi_error_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
683 cfi_error_action error_action;
685 error_action = CFI_ERR_RETRY;
687 switch (io->io_hdr.io_type) {
689 switch (io->io_hdr.status & CTL_STATUS_MASK) {
691 switch (io->scsiio.scsi_status) {
692 case SCSI_STATUS_RESERV_CONFLICT:
694 * For a reservation conflict, we'll usually
695 * want the hard error recovery policy, so
696 * we'll reset the LUN.
698 if (lun_io->policy == CFI_ERR_HARD)
705 case SCSI_STATUS_CHECK_COND:
707 error_action = cfi_checkcond_parse(io, lun_io);
712 error_action = CFI_ERR_RETRY;
718 * In theory task management commands shouldn't fail...
720 error_action = CFI_ERR_RETRY;
723 printf("%s: invalid ctl_io type %d\n", __func__,
725 panic("%s: invalid ctl_io type %d\n", __func__,
730 return (error_action);
734 cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
735 struct cfi_metatask *metatask, cfi_error_policy policy, int retries,
736 struct cfi_lun_io *orig_lun_io,
737 void (*done_function)(union ctl_io *io))
739 struct cfi_lun_io *lun_io;
741 io->io_hdr.nexus.initid.id = 7;
742 io->io_hdr.nexus.targ_port = lun->softc->fe.targ_port;
743 io->io_hdr.nexus.targ_target.id = lun->target_id.id;
744 io->io_hdr.nexus.targ_lun = lun->lun_id;
745 io->io_hdr.retries = retries;
746 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
747 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = lun_io;
749 lun_io->metatask = metatask;
751 lun_io->policy = policy;
752 lun_io->orig_lun_io = orig_lun_io;
753 lun_io->done_function = done_function;
755 * We only set the tag number for SCSI I/Os. For task management
756 * commands, the tag number is only really needed for aborts, so
757 * the caller can set it if necessary.
759 switch (io->io_hdr.io_type) {
761 io->scsiio.tag_num = lun->cur_tag_num++;
770 cfi_done(union ctl_io *io)
772 struct cfi_lun_io *lun_io;
773 struct cfi_softc *softc;
776 lun_io = (struct cfi_lun_io *)
777 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
783 * Very minimal retry logic. We basically retry if we got an error
784 * back, and the retry count is greater than 0. If we ever want
785 * more sophisticated initiator type behavior, the CAM error
786 * recovery code in ../common might be helpful.
788 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
789 && (io->io_hdr.retries > 0)) {
790 ctl_io_status old_status;
791 cfi_error_action error_action;
793 error_action = cfi_error_parse(io, lun_io);
795 switch (error_action & CFI_ERR_MASK) {
798 break; /* NOTREACHED */
799 case CFI_ERR_LUN_RESET: {
800 union ctl_io *new_io;
801 struct cfi_lun_io *new_lun_io;
803 new_io = ctl_alloc_io(softc->fe.ctl_pool_ref);
804 if (new_io == NULL) {
805 printf("%s: unable to allocate ctl_io for "
806 "error recovery\n", __func__);
811 new_io->io_hdr.io_type = CTL_IO_TASK;
812 new_io->taskio.task_action = CTL_TASK_LUN_RESET;
817 /*policy*/ CFI_ERR_SOFT,
819 /*orig_lun_io*/lun_io,
820 /*done_function*/ cfi_err_recovery_done);
823 new_lun_io = (struct cfi_lun_io *)
824 new_io->io_hdr.port_priv;
826 mtx_lock(&lun->softc->lock);
827 STAILQ_INSERT_TAIL(&lun->io_list, new_lun_io, links);
828 mtx_unlock(&lun->softc->lock);
835 if ((error_action & CFI_ERR_NO_DECREMENT) == 0)
836 io->io_hdr.retries--;
840 old_status = io->io_hdr.status;
841 io->io_hdr.status = CTL_STATUS_NONE;
843 io->io_hdr.flags &= ~CTL_FLAG_ALREADY_DONE;
845 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
846 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
848 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
849 printf("%s: error returned from ctl_queue()!\n",
851 io->io_hdr.status = old_status;
856 lun_io->done_function(io);
860 cfi_lun_probe_done(union ctl_io *io)
863 struct cfi_lun_io *lun_io;
865 lun_io = (struct cfi_lun_io *)
866 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
869 switch (lun->state) {
870 case CFI_LUN_INQUIRY: {
871 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
872 /* print out something here?? */
873 printf("%s: LUN %d probe failed because inquiry "
874 "failed\n", __func__, lun->lun_id);
875 ctl_io_error_print(io, NULL);
878 if (SID_TYPE(&lun->inq_data) != T_DIRECT) {
881 lun->state = CFI_LUN_READY;
882 ctl_scsi_path_string(io, path_str,
884 printf("%s", path_str);
885 scsi_print_inquiry(&lun->inq_data);
887 lun->state = CFI_LUN_READCAPACITY;
888 cfi_lun_probe(lun, /*have_lock*/ 0);
891 mtx_lock(&lun->softc->lock);
892 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
893 mtx_unlock(&lun->softc->lock);
897 case CFI_LUN_READCAPACITY:
898 case CFI_LUN_READCAPACITY_16: {
905 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
906 printf("%s: LUN %d probe failed because READ CAPACITY "
907 "failed\n", __func__, lun->lun_id);
908 ctl_io_error_print(io, NULL);
911 if (lun->state == CFI_LUN_READCAPACITY) {
912 struct scsi_read_capacity_data *rdcap;
914 rdcap = (struct scsi_read_capacity_data *)
915 io->scsiio.ext_data_ptr;
917 maxlba = scsi_4btoul(rdcap->addr);
918 blocksize = scsi_4btoul(rdcap->length);
919 if (blocksize == 0) {
920 printf("%s: LUN %d has invalid "
921 "blocksize 0, probe aborted\n",
922 __func__, lun->lun_id);
923 } else if (maxlba == 0xffffffff) {
924 lun->state = CFI_LUN_READCAPACITY_16;
925 cfi_lun_probe(lun, /*have_lock*/ 0);
927 lun->state = CFI_LUN_READY;
929 struct scsi_read_capacity_data_long *rdcap_long;
932 scsi_read_capacity_data_long *)
933 io->scsiio.ext_data_ptr;
934 maxlba = scsi_8btou64(rdcap_long->addr);
935 blocksize = scsi_4btoul(rdcap_long->length);
937 if (blocksize == 0) {
938 printf("%s: LUN %d has invalid "
939 "blocksize 0, probe aborted\n",
940 __func__, lun->lun_id);
942 lun->state = CFI_LUN_READY;
946 if (lun->state == CFI_LUN_READY) {
949 lun->num_blocks = maxlba + 1;
950 lun->blocksize = blocksize;
953 * If this is true, the blocksize is a power of 2.
954 * We already checked for 0 above.
956 if (((blocksize - 1) & blocksize) == 0) {
959 for (i = 0; i < 32; i++) {
960 if ((blocksize & (1 << i)) != 0) {
961 lun->blocksize_powerof2 = i;
966 ctl_scsi_path_string(io, path_str,sizeof(path_str));
967 printf("%s", path_str);
968 scsi_print_inquiry(&lun->inq_data);
969 printf("%s %ju blocks, blocksize %d\n", path_str,
970 (uintmax_t)maxlba + 1, blocksize);
972 mtx_lock(&lun->softc->lock);
973 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
974 mtx_unlock(&lun->softc->lock);
975 free(io->scsiio.ext_data_ptr, M_CTL_CFI);
981 mtx_lock(&lun->softc->lock);
982 /* How did we get here?? */
983 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
984 mtx_unlock(&lun->softc->lock);
991 cfi_lun_probe(struct cfi_lun *lun, int have_lock)
995 mtx_lock(&lun->softc->lock);
996 if ((lun->softc->flags & CFI_ONLINE) == 0) {
998 mtx_unlock(&lun->softc->lock);
1002 mtx_unlock(&lun->softc->lock);
1004 switch (lun->state) {
1005 case CFI_LUN_INQUIRY: {
1006 struct cfi_lun_io *lun_io;
1009 io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
1011 printf("%s: unable to alloc ctl_io for target %ju "
1012 "lun %d probe\n", __func__,
1013 (uintmax_t)lun->target_id.id, lun->lun_id);
1016 ctl_scsi_inquiry(io,
1017 /*data_ptr*/(uint8_t *)&lun->inq_data,
1018 /*data_len*/ sizeof(lun->inq_data),
1021 /*tag_type*/ CTL_TAG_SIMPLE,
1027 /*policy*/ CFI_ERR_SOFT,
1029 /*orig_lun_io*/ NULL,
1031 cfi_lun_probe_done);
1033 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1036 mtx_lock(&lun->softc->lock);
1037 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1039 mtx_unlock(&lun->softc->lock);
1041 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1042 printf("%s: error returned from ctl_queue()!\n",
1044 STAILQ_REMOVE(&lun->io_list, lun_io,
1050 case CFI_LUN_READCAPACITY:
1051 case CFI_LUN_READCAPACITY_16: {
1052 struct cfi_lun_io *lun_io;
1056 io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
1058 printf("%s: unable to alloc ctl_io for target %ju "
1059 "lun %d probe\n", __func__,
1060 (uintmax_t)lun->target_id.id, lun->lun_id);
1064 dataptr = malloc(sizeof(struct scsi_read_capacity_data_long),
1065 M_CTL_CFI, M_NOWAIT);
1066 if (dataptr == NULL) {
1067 printf("%s: unable to allocate SCSI read capacity "
1068 "buffer for target %ju lun %d\n", __func__,
1069 (uintmax_t)lun->target_id.id, lun->lun_id);
1072 if (lun->state == CFI_LUN_READCAPACITY) {
1073 ctl_scsi_read_capacity(io,
1074 /*data_ptr*/ dataptr,
1076 sizeof(struct scsi_read_capacity_data_long),
1080 /*tag_type*/ CTL_TAG_SIMPLE,
1083 ctl_scsi_read_capacity_16(io,
1084 /*data_ptr*/ dataptr,
1086 sizeof(struct scsi_read_capacity_data_long),
1090 /*tag_type*/ CTL_TAG_SIMPLE,
1096 /*policy*/ CFI_ERR_SOFT,
1098 /*orig_lun_io*/ NULL,
1099 /*done_function*/ cfi_lun_probe_done);
1101 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1104 mtx_lock(&lun->softc->lock);
1105 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1107 mtx_unlock(&lun->softc->lock);
1109 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1110 printf("%s: error returned from ctl_queue()!\n",
1112 STAILQ_REMOVE(&lun->io_list, lun_io,
1114 free(dataptr, M_CTL_CFI);
1121 /* Why were we called? */
1127 cfi_metatask_done(struct cfi_softc *softc, struct cfi_metatask *metatask)
1129 mtx_lock(&softc->lock);
1130 STAILQ_REMOVE(&softc->metatask_list, metatask, cfi_metatask, links);
1131 mtx_unlock(&softc->lock);
1134 * Return status to the caller. Caller allocated storage, and is
1135 * responsible for calling cfi_free_metatask to release it once
1136 * they've seen the status.
1138 metatask->callback(metatask->callback_arg, metatask);
1142 cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask, union ctl_io *io)
1144 int error_code, sense_key, asc, ascq;
1146 if (metatask->tasktype != CFI_TASK_BBRREAD)
1149 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
1150 metatask->status = CFI_MT_SUCCESS;
1151 metatask->taskinfo.bbrread.status = CFI_BBR_SUCCESS;
1155 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) {
1156 metatask->status = CFI_MT_ERROR;
1157 metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1161 metatask->taskinfo.bbrread.scsi_status = io->scsiio.scsi_status;
1162 memcpy(&metatask->taskinfo.bbrread.sense_data, &io->scsiio.sense_data,
1163 ctl_min(sizeof(metatask->taskinfo.bbrread.sense_data),
1164 sizeof(io->scsiio.sense_data)));
1166 if (io->scsiio.scsi_status == SCSI_STATUS_RESERV_CONFLICT) {
1167 metatask->status = CFI_MT_ERROR;
1168 metatask->taskinfo.bbrread.status = CFI_BBR_RESERV_CONFLICT;
1172 if (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) {
1173 metatask->status = CFI_MT_ERROR;
1174 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1178 scsi_extract_sense_len(&io->scsiio.sense_data,
1179 io->scsiio.sense_len,
1186 switch (error_code) {
1187 case SSD_DEFERRED_ERROR:
1188 case SSD_DESC_DEFERRED_ERROR:
1189 metatask->status = CFI_MT_ERROR;
1190 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1192 case SSD_CURRENT_ERROR:
1193 case SSD_DESC_CURRENT_ERROR:
1195 struct scsi_sense_data *sense;
1197 sense = &io->scsiio.sense_data;
1199 if ((asc == 0x04) && (ascq == 0x02)) {
1200 metatask->status = CFI_MT_ERROR;
1201 metatask->taskinfo.bbrread.status = CFI_BBR_LUN_STOPPED;
1202 } else if ((asc == 0x04) && (ascq == 0x03)) {
1203 metatask->status = CFI_MT_ERROR;
1204 metatask->taskinfo.bbrread.status =
1205 CFI_BBR_LUN_OFFLINE_CTL;
1206 } else if ((asc == 0x44) && (ascq == 0x00)) {
1208 if (sense->sense_key_spec[0] & SSD_SCS_VALID) {
1209 uint16_t retry_count;
1211 retry_count = sense->sense_key_spec[1] << 8 |
1212 sense->sense_key_spec[2];
1213 if (((retry_count & 0xf000) == CSC_RAIDCORE)
1214 && ((retry_count & 0x0f00) == CSC_SHELF_SW)
1215 && ((retry_count & 0xff) ==
1216 RC_STS_DEVICE_OFFLINE)) {
1217 metatask->status = CFI_MT_ERROR;
1218 metatask->taskinfo.bbrread.status =
1219 CFI_BBR_LUN_OFFLINE_RC;
1221 metatask->status = CFI_MT_ERROR;
1222 metatask->taskinfo.bbrread.status =
1226 #endif /* NEEDTOPORT */
1227 metatask->status = CFI_MT_ERROR;
1228 metatask->taskinfo.bbrread.status =
1234 metatask->status = CFI_MT_ERROR;
1235 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1243 cfi_metatask_io_done(union ctl_io *io)
1245 struct cfi_lun_io *lun_io;
1246 struct cfi_metatask *metatask;
1247 struct cfi_softc *softc;
1248 struct cfi_lun *lun;
1250 lun_io = (struct cfi_lun_io *)
1251 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1256 metatask = lun_io->metatask;
1258 switch (metatask->tasktype) {
1259 case CFI_TASK_STARTUP:
1260 case CFI_TASK_SHUTDOWN: {
1261 int failed, done, is_start;
1265 if (metatask->tasktype == CFI_TASK_STARTUP)
1270 mtx_lock(&softc->lock);
1271 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
1272 metatask->taskinfo.startstop.luns_complete++;
1274 metatask->taskinfo.startstop.luns_failed++;
1277 if ((metatask->taskinfo.startstop.luns_complete +
1278 metatask->taskinfo.startstop.luns_failed) >=
1279 metatask->taskinfo.startstop.total_luns)
1282 mtx_unlock(&softc->lock);
1285 printf("%s: LUN %d %s request failed\n", __func__,
1286 lun_io->lun->lun_id, (is_start == 1) ? "start" :
1288 ctl_io_error_print(io, &lun_io->lun->inq_data);
1291 if (metatask->taskinfo.startstop.luns_failed > 0)
1292 metatask->status = CFI_MT_ERROR;
1294 metatask->status = CFI_MT_SUCCESS;
1295 cfi_metatask_done(softc, metatask);
1297 mtx_lock(&softc->lock);
1298 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1299 mtx_unlock(&softc->lock);
1304 case CFI_TASK_BBRREAD: {
1306 * Translate the SCSI error into an enumeration.
1308 cfi_metatask_bbr_errorparse(metatask, io);
1310 mtx_lock(&softc->lock);
1311 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1312 mtx_unlock(&softc->lock);
1316 cfi_metatask_done(softc, metatask);
1321 * This shouldn't happen.
1323 mtx_lock(&softc->lock);
1324 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1325 mtx_unlock(&softc->lock);
1333 cfi_err_recovery_done(union ctl_io *io)
1335 struct cfi_lun_io *lun_io, *orig_lun_io;
1336 struct cfi_lun *lun;
1337 union ctl_io *orig_io;
1339 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1340 orig_lun_io = lun_io->orig_lun_io;
1341 orig_io = orig_lun_io->ctl_io;
1344 if (io->io_hdr.status != CTL_SUCCESS) {
1345 printf("%s: error recovery action failed. Original "
1346 "error:\n", __func__);
1348 ctl_io_error_print(orig_lun_io->ctl_io, &lun->inq_data);
1350 printf("%s: error from error recovery action:\n", __func__);
1352 ctl_io_error_print(io, &lun->inq_data);
1354 printf("%s: trying original command again...\n", __func__);
1357 mtx_lock(&lun->softc->lock);
1358 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1359 mtx_unlock(&lun->softc->lock);
1362 orig_io->io_hdr.retries--;
1363 orig_io->io_hdr.status = CTL_STATUS_NONE;
1365 if (ctl_queue(orig_io) != CTL_RETVAL_COMPLETE) {
1366 printf("%s: error returned from ctl_queue()!\n", __func__);
1367 STAILQ_REMOVE(&lun->io_list, orig_lun_io,
1369 ctl_free_io(orig_io);
1374 cfi_lun_io_done(union ctl_io *io)
1376 struct cfi_lun *lun;
1377 struct cfi_lun_io *lun_io;
1379 lun_io = (struct cfi_lun_io *)
1380 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1383 if (lun_io->metatask == NULL) {
1384 printf("%s: I/O has no metatask pointer, discarding\n",
1386 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1390 cfi_metatask_io_done(io);
1394 cfi_action(struct cfi_metatask *metatask)
1396 struct cfi_softc *softc;
1398 softc = &fetd_internal_softc;
1400 mtx_lock(&softc->lock);
1402 STAILQ_INSERT_TAIL(&softc->metatask_list, metatask, links);
1404 if ((softc->flags & CFI_ONLINE) == 0) {
1405 mtx_unlock(&softc->lock);
1406 metatask->status = CFI_MT_PORT_OFFLINE;
1407 cfi_metatask_done(softc, metatask);
1410 mtx_unlock(&softc->lock);
1412 switch (metatask->tasktype) {
1413 case CFI_TASK_STARTUP:
1414 case CFI_TASK_SHUTDOWN: {
1416 int da_luns, ios_allocated, do_start;
1417 struct cfi_lun *lun;
1418 STAILQ_HEAD(, ctl_io_hdr) tmp_io_list;
1422 STAILQ_INIT(&tmp_io_list);
1424 if (metatask->tasktype == CFI_TASK_STARTUP)
1429 mtx_lock(&softc->lock);
1430 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1431 if (lun->state != CFI_LUN_READY)
1434 if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1437 io = ctl_alloc_io(softc->fe.ctl_pool_ref);
1440 STAILQ_INSERT_TAIL(&tmp_io_list, &io->io_hdr,
1445 if (ios_allocated < da_luns) {
1446 printf("%s: error allocating ctl_io for %s\n",
1447 __func__, (do_start == 1) ? "startup" :
1449 da_luns = ios_allocated;
1452 metatask->taskinfo.startstop.total_luns = da_luns;
1454 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1455 struct cfi_lun_io *lun_io;
1457 if (lun->state != CFI_LUN_READY)
1460 if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1463 io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1467 STAILQ_REMOVE(&tmp_io_list, &io->io_hdr, ctl_io_hdr,
1470 ctl_scsi_start_stop(io,
1474 /*power_conditions*/
1477 /*ctl_tag_type*/ CTL_TAG_ORDERED,
1482 /*metatask*/ metatask,
1483 /*policy*/ CFI_ERR_HARD,
1485 /*orig_lun_io*/ NULL,
1486 /*done_function*/ cfi_lun_io_done);
1488 lun_io = (struct cfi_lun_io *) io->io_hdr.port_priv;
1490 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1492 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1493 printf("%s: error returned from ctl_queue()!\n",
1495 STAILQ_REMOVE(&lun->io_list, lun_io,
1498 metatask->taskinfo.startstop.total_luns--;
1502 if (STAILQ_FIRST(&tmp_io_list) != NULL) {
1503 printf("%s: error: tmp_io_list != NULL\n", __func__);
1504 for (io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1506 io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list)) {
1507 STAILQ_REMOVE(&tmp_io_list, &io->io_hdr,
1512 mtx_unlock(&softc->lock);
1516 case CFI_TASK_BBRREAD: {
1518 struct cfi_lun *lun;
1519 struct cfi_lun_io *lun_io;
1520 cfi_bbrread_status status;
1522 uint32_t num_blocks;
1524 status = CFI_BBR_SUCCESS;
1526 req_lun_num = metatask->taskinfo.bbrread.lun_num;
1528 mtx_lock(&softc->lock);
1529 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1530 if (lun->lun_id != req_lun_num)
1532 if (lun->state != CFI_LUN_READY) {
1533 status = CFI_BBR_LUN_UNCONFIG;
1540 status = CFI_BBR_NO_LUN;
1542 if (status != CFI_BBR_SUCCESS) {
1543 metatask->status = CFI_MT_ERROR;
1544 metatask->taskinfo.bbrread.status = status;
1545 mtx_unlock(&softc->lock);
1546 cfi_metatask_done(softc, metatask);
1551 * Convert the number of bytes given into blocks and check
1552 * that the number of bytes is a multiple of the blocksize.
1553 * CTL will verify that the LBA is okay.
1555 if (lun->blocksize_powerof2 != 0) {
1556 if ((metatask->taskinfo.bbrread.len &
1557 (lun->blocksize - 1)) != 0) {
1558 metatask->status = CFI_MT_ERROR;
1559 metatask->taskinfo.bbrread.status =
1561 cfi_metatask_done(softc, metatask);
1565 num_blocks = metatask->taskinfo.bbrread.len >>
1566 lun->blocksize_powerof2;
1569 * XXX KDM this could result in floating point
1570 * division, which isn't supported in the kernel on
1573 if ((metatask->taskinfo.bbrread.len %
1574 lun->blocksize) != 0) {
1575 metatask->status = CFI_MT_ERROR;
1576 metatask->taskinfo.bbrread.status =
1578 cfi_metatask_done(softc, metatask);
1583 * XXX KDM this could result in floating point
1584 * division in some cases.
1586 num_blocks = metatask->taskinfo.bbrread.len /
1591 io = ctl_alloc_io(softc->fe.ctl_pool_ref);
1593 metatask->status = CFI_MT_ERROR;
1594 metatask->taskinfo.bbrread.status = CFI_BBR_NO_MEM;
1595 mtx_unlock(&softc->lock);
1596 cfi_metatask_done(softc, metatask);
1601 * XXX KDM need to do a read capacity to get the blocksize
1604 ctl_scsi_read_write(io,
1606 /*data_len*/ metatask->taskinfo.bbrread.len,
1609 /*minimum_cdb_size*/ 0,
1610 /*lba*/ metatask->taskinfo.bbrread.lba,
1611 /*num_blocks*/ num_blocks,
1612 /*tag_type*/ CTL_TAG_SIMPLE,
1617 /*metatask*/ metatask,
1618 /*policy*/ CFI_ERR_SOFT,
1620 /*orig_lun_io*/ NULL,
1621 /*done_function*/ cfi_lun_io_done);
1623 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1625 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1627 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1628 printf("%s: error returned from ctl_queue()!\n",
1630 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1632 metatask->status = CFI_MT_ERROR;
1633 metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1634 mtx_unlock(&softc->lock);
1635 cfi_metatask_done(softc, metatask);
1639 mtx_unlock(&softc->lock);
1643 panic("invalid metatask type %d", metatask->tasktype);
1644 break; /* NOTREACHED */
1648 struct cfi_metatask *
1649 cfi_alloc_metatask(int can_wait)
1651 struct cfi_metatask *metatask;
1652 struct cfi_softc *softc;
1654 softc = &fetd_internal_softc;
1656 metatask = uma_zalloc(cfi_metatask_zone,
1657 (can_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
1658 if (metatask == NULL)
1661 metatask->status = CFI_MT_NONE;
1667 cfi_free_metatask(struct cfi_metatask *metatask)
1670 uma_zfree(cfi_metatask_zone, metatask);