2 * Copyright (c) 2004, 2005 Silicon Graphics International Corp.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * substantially similar to the "NO WARRANTY" disclaimer below
13 * ("Disclaimer") and any redistribution must be conditioned upon
14 * including a substantially similar Disclaimer requirement for further
15 * binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_internal.c#5 $
33 * CTL kernel internal frontend target driver. This allows kernel-level
34 * clients to send commands into CTL.
36 * This has elements of a FETD (e.g. it has to set tag numbers, initiator,
37 * port, target, and LUN) and elements of an initiator (LUN discovery and
38 * probing, error recovery, command initiation). Even though this has some
39 * initiator type elements, this is not intended to be a full fledged
40 * initiator layer. It is only intended to send a limited number of
41 * commands to a well known target layer.
43 * To be able to fulfill the role of a full initiator layer, it would need
44 * a whole lot more functionality.
46 * Author: Ken Merry <ken@FreeBSD.org>
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/types.h>
57 #include <sys/malloc.h>
58 #include <sys/module.h>
60 #include <sys/mutex.h>
61 #include <sys/condvar.h>
62 #include <sys/queue.h>
64 #include <sys/sysctl.h>
66 #include <cam/scsi/scsi_all.h>
67 #include <cam/scsi/scsi_da.h>
68 #include <cam/ctl/ctl_io.h>
69 #include <cam/ctl/ctl.h>
70 #include <cam/ctl/ctl_frontend.h>
71 #include <cam/ctl/ctl_frontend_internal.h>
72 #include <cam/ctl/ctl_backend.h>
73 #include <cam/ctl/ctl_ioctl.h>
74 #include <cam/ctl/ctl_util.h>
75 #include <cam/ctl/ctl_ha.h>
76 #include <cam/ctl/ctl_private.h>
77 #include <cam/ctl/ctl_debug.h>
78 #include <cam/ctl/ctl_scsi_all.h>
79 #include <cam/ctl/ctl_error.h>
83 * - overall metatask, different potential metatask types (e.g. forced
84 * shutdown, gentle shutdown)
85 * - forced shutdown metatask:
86 * - states: report luns, pending, done?
87 * - list of luns pending, with the relevant I/O for that lun attached.
88 * This would allow moving ahead on LUNs with no errors, and going
89 * into error recovery on LUNs with problems. Per-LUN states might
90 * include inquiry, stop/offline, done.
92 * Use LUN enable for LUN list instead of getting it manually? We'd still
93 * need inquiry data for each LUN.
95 * How to handle processor LUN w.r.t. found/stopped counts?
104 struct cfi_task_startstop {
110 /* XXX KDM add more fields here */
114 struct cfi_task_startstop startstop;
117 struct cfi_metatask {
118 cfi_tasktype tasktype;
119 cfi_mt_status status;
120 union cfi_taskinfo taskinfo;
122 STAILQ_ENTRY(cfi_metatask) links;
127 CFI_ERR_RETRY = 0x000,
128 CFI_ERR_FAIL = 0x001,
129 CFI_ERR_LUN_RESET = 0x002,
130 CFI_ERR_MASK = 0x0ff,
131 CFI_ERR_NO_DECREMENT = 0x100
141 CFI_LUN_READCAPACITY,
142 CFI_LUN_READCAPACITY_16,
148 struct scsi_inquiry_data inq_data;
151 int blocksize_powerof2;
152 uint32_t cur_tag_num;
154 struct cfi_softc *softc;
155 STAILQ_HEAD(, cfi_lun_io) io_list;
156 STAILQ_ENTRY(cfi_lun) links;
161 struct cfi_metatask *metatask;
162 cfi_error_policy policy;
163 void (*done_function)(union ctl_io *io);
164 union ctl_io *ctl_io;
165 struct cfi_lun_io *orig_lun_io;
166 STAILQ_ENTRY(cfi_lun_io) links;
175 struct ctl_port port;
179 STAILQ_HEAD(, cfi_lun) lun_list;
180 STAILQ_HEAD(, cfi_metatask) metatask_list;
183 MALLOC_DEFINE(M_CTL_CFI, "ctlcfi", "CTL CFI");
185 static uma_zone_t cfi_lun_zone;
186 static uma_zone_t cfi_metatask_zone;
188 static struct cfi_softc fetd_internal_softc;
191 void cfi_shutdown(void) __unused;
192 static void cfi_online(void *arg);
193 static void cfi_offline(void *arg);
194 static int cfi_lun_enable(void *arg, int lun_id);
195 static int cfi_lun_disable(void *arg, int lun_id);
196 static void cfi_datamove(union ctl_io *io);
197 static cfi_error_action cfi_checkcond_parse(union ctl_io *io,
198 struct cfi_lun_io *lun_io);
199 static cfi_error_action cfi_error_parse(union ctl_io *io,
200 struct cfi_lun_io *lun_io);
201 static void cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
202 struct cfi_metatask *metatask, cfi_error_policy policy,
203 int retries, struct cfi_lun_io *orig_lun_io,
204 void (*done_function)(union ctl_io *io));
205 static void cfi_done(union ctl_io *io);
206 static void cfi_lun_probe_done(union ctl_io *io);
207 static void cfi_lun_probe(struct cfi_lun *lun, int have_lock);
208 static void cfi_metatask_done(struct cfi_softc *softc,
209 struct cfi_metatask *metatask);
210 static void cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask,
212 static void cfi_metatask_io_done(union ctl_io *io);
213 static void cfi_err_recovery_done(union ctl_io *io);
214 static void cfi_lun_io_done(union ctl_io *io);
216 static struct ctl_frontend cfi_frontend =
220 .shutdown = cfi_shutdown,
222 CTL_FRONTEND_DECLARE(ctlcfi, cfi_frontend);
227 struct cfi_softc *softc;
228 struct ctl_port *port;
231 softc = &fetd_internal_softc;
237 if (sizeof(struct cfi_lun_io) > CTL_PORT_PRIV_SIZE) {
238 printf("%s: size of struct cfi_lun_io %zd > "
239 "CTL_PORT_PRIV_SIZE %d\n", __func__,
240 sizeof(struct cfi_lun_io),
243 memset(softc, 0, sizeof(*softc));
245 mtx_init(&softc->lock, "CTL frontend mutex", NULL, MTX_DEF);
246 STAILQ_INIT(&softc->lun_list);
247 STAILQ_INIT(&softc->metatask_list);
248 sprintf(softc->fe_name, "kernel");
249 port->frontend = &cfi_frontend;
250 port->port_type = CTL_PORT_INTERNAL;
251 port->num_requested_ctl_io = 100;
252 port->port_name = softc->fe_name;
253 port->port_online = cfi_online;
254 port->port_offline = cfi_offline;
255 port->onoff_arg = softc;
256 port->lun_enable = cfi_lun_enable;
257 port->lun_disable = cfi_lun_disable;
258 port->targ_lun_arg = softc;
259 port->fe_datamove = cfi_datamove;
260 port->fe_done = cfi_done;
261 port->max_targets = 15;
262 port->max_target_id = 15;
264 if (ctl_port_register(port) != 0)
266 printf("%s: internal frontend registration failed\n", __func__);
270 cfi_lun_zone = uma_zcreate("cfi_lun", sizeof(struct cfi_lun),
271 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
272 cfi_metatask_zone = uma_zcreate("cfi_metatask", sizeof(struct cfi_metatask),
273 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
281 struct cfi_softc *softc;
283 softc = &fetd_internal_softc;
286 * XXX KDM need to clear out any I/O pending on each LUN.
288 if (ctl_port_deregister(&softc->port) != 0)
289 printf("%s: ctl_frontend_deregister() failed\n", __func__);
291 uma_zdestroy(cfi_lun_zone);
292 uma_zdestroy(cfi_metatask_zone);
296 cfi_online(void *arg)
298 struct cfi_softc *softc;
301 softc = (struct cfi_softc *)arg;
303 softc->flags |= CFI_ONLINE;
306 * Go through and kick off the probe for each lun. Should we check
307 * the LUN flags here to determine whether or not to probe it?
309 mtx_lock(&softc->lock);
310 STAILQ_FOREACH(lun, &softc->lun_list, links)
311 cfi_lun_probe(lun, /*have_lock*/ 1);
312 mtx_unlock(&softc->lock);
316 cfi_offline(void *arg)
318 struct cfi_softc *softc;
320 softc = (struct cfi_softc *)arg;
322 softc->flags &= ~CFI_ONLINE;
326 cfi_lun_enable(void *arg, int lun_id)
328 struct cfi_softc *softc;
332 softc = (struct cfi_softc *)arg;
335 mtx_lock(&softc->lock);
336 STAILQ_FOREACH(lun, &softc->lun_list, links) {
337 if (lun->lun_id == lun_id) {
342 mtx_unlock(&softc->lock);
345 * If we already have this target/LUN, there is no reason to add
346 * it to our lists again.
351 lun = uma_zalloc(cfi_lun_zone, M_NOWAIT | M_ZERO);
353 printf("%s: unable to allocate LUN structure\n", __func__);
357 lun->lun_id = lun_id;
358 lun->cur_tag_num = 0;
359 lun->state = CFI_LUN_INQUIRY;
361 STAILQ_INIT(&lun->io_list);
363 mtx_lock(&softc->lock);
364 STAILQ_INSERT_TAIL(&softc->lun_list, lun, links);
365 mtx_unlock(&softc->lock);
367 cfi_lun_probe(lun, /*have_lock*/ 0);
373 cfi_lun_disable(void *arg, int lun_id)
375 struct cfi_softc *softc;
379 softc = (struct cfi_softc *)arg;
384 * XXX KDM need to do an invalidate and then a free when any
385 * pending I/O has completed. Or do we? CTL won't free a LUN
386 * while any I/O is pending. So we won't get this notification
387 * unless any I/O we have pending on a LUN has completed.
389 mtx_lock(&softc->lock);
390 STAILQ_FOREACH(lun, &softc->lun_list, links) {
391 if (lun->lun_id == lun_id) {
397 STAILQ_REMOVE(&softc->lun_list, lun, cfi_lun, links);
399 mtx_unlock(&softc->lock);
402 printf("%s: can't find lun %d\n", __func__, lun_id);
406 uma_zfree(cfi_lun_zone, lun);
412 cfi_datamove(union ctl_io *io)
414 struct ctl_sg_entry *ext_sglist, *kern_sglist;
415 struct ctl_sg_entry ext_entry, kern_entry;
416 int ext_sglen, ext_sg_entries, kern_sg_entries;
417 int ext_sg_start, ext_offset;
418 int len_to_copy, len_copied;
419 int kern_watermark, ext_watermark;
420 int ext_sglist_malloced;
421 struct ctl_scsiio *ctsio;
424 ext_sglist_malloced = 0;
429 CTL_DEBUG_PRINT(("%s\n", __func__));
434 * If this is the case, we're probably doing a BBR read and don't
435 * actually need to transfer the data. This will effectively
436 * bit-bucket the data.
438 if (ctsio->ext_data_ptr == NULL)
442 * To simplify things here, if we have a single buffer, stick it in
443 * a S/G entry and just make it a single entry S/G list.
445 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
448 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
450 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL_CFI,
452 ext_sglist_malloced = 1;
453 if (memcpy(ext_sglist, ctsio->ext_data_ptr, ext_sglen) != 0) {
454 ctl_set_internal_failure(ctsio,
459 ext_sg_entries = ctsio->ext_sg_entries;
461 for (i = 0; i < ext_sg_entries; i++) {
462 if ((len_seen + ext_sglist[i].len) >=
463 ctsio->ext_data_filled) {
465 ext_offset = ctsio->ext_data_filled - len_seen;
468 len_seen += ext_sglist[i].len;
471 ext_sglist = &ext_entry;
472 ext_sglist->addr = ctsio->ext_data_ptr;
473 ext_sglist->len = ctsio->ext_data_len;
476 ext_offset = ctsio->ext_data_filled;
479 if (ctsio->kern_sg_entries > 0) {
480 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
481 kern_sg_entries = ctsio->kern_sg_entries;
483 kern_sglist = &kern_entry;
484 kern_sglist->addr = ctsio->kern_data_ptr;
485 kern_sglist->len = ctsio->kern_data_len;
491 ext_watermark = ext_offset;
493 for (i = ext_sg_start, j = 0;
494 i < ext_sg_entries && j < kern_sg_entries;) {
495 uint8_t *ext_ptr, *kern_ptr;
497 len_to_copy = MIN(ext_sglist[i].len - ext_watermark,
498 kern_sglist[j].len - kern_watermark);
500 ext_ptr = (uint8_t *)ext_sglist[i].addr;
501 ext_ptr = ext_ptr + ext_watermark;
502 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
506 panic("need to implement bus address support");
508 kern_ptr = bus_to_virt(kern_sglist[j].addr);
511 kern_ptr = (uint8_t *)kern_sglist[j].addr;
512 kern_ptr = kern_ptr + kern_watermark;
514 kern_watermark += len_to_copy;
515 ext_watermark += len_to_copy;
517 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
519 CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
520 __func__, len_to_copy));
521 CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
523 memcpy(ext_ptr, kern_ptr, len_to_copy);
525 CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n",
526 __func__, len_to_copy));
527 CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
529 memcpy(kern_ptr, ext_ptr, len_to_copy);
532 len_copied += len_to_copy;
534 if (ext_sglist[i].len == ext_watermark) {
539 if (kern_sglist[j].len == kern_watermark) {
545 ctsio->ext_data_filled += len_copied;
547 CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
548 __func__, ext_sg_entries, kern_sg_entries));
549 CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
550 __func__, ctsio->ext_data_len, ctsio->kern_data_len));
553 /* XXX KDM set residual?? */
556 if (ext_sglist_malloced != 0)
557 free(ext_sglist, M_CTL_CFI);
559 io->scsiio.be_move_done(io);
565 * For any sort of check condition, busy, etc., we just retry. We do not
566 * decrement the retry count for unit attention type errors. These are
567 * normal, and we want to save the retry count for "real" errors. Otherwise,
568 * we could end up with situations where a command will succeed in some
569 * situations and fail in others, depending on whether a unit attention is
570 * pending. Also, some of our error recovery actions, most notably the
571 * LUN reset action, will cause a unit attention.
573 * We can add more detail here later if necessary.
575 static cfi_error_action
576 cfi_checkcond_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
578 cfi_error_action error_action;
579 int error_code, sense_key, asc, ascq;
582 * Default to retrying the command.
584 error_action = CFI_ERR_RETRY;
586 scsi_extract_sense_len(&io->scsiio.sense_data,
587 io->scsiio.sense_len,
594 switch (error_code) {
595 case SSD_DEFERRED_ERROR:
596 case SSD_DESC_DEFERRED_ERROR:
597 error_action |= CFI_ERR_NO_DECREMENT;
599 case SSD_CURRENT_ERROR:
600 case SSD_DESC_CURRENT_ERROR:
603 case SSD_KEY_UNIT_ATTENTION:
604 error_action |= CFI_ERR_NO_DECREMENT;
606 case SSD_KEY_HARDWARE_ERROR:
608 * This is our generic "something bad happened"
609 * error code. It often isn't recoverable.
611 if ((asc == 0x44) && (ascq == 0x00))
612 error_action = CFI_ERR_FAIL;
614 case SSD_KEY_NOT_READY:
616 * If the LUN is powered down, there likely isn't
617 * much point in retrying right now.
619 if ((asc == 0x04) && (ascq == 0x02))
620 error_action = CFI_ERR_FAIL;
622 * If the LUN is offline, there probably isn't much
623 * point in retrying, either.
625 if ((asc == 0x04) && (ascq == 0x03))
626 error_action = CFI_ERR_FAIL;
632 return (error_action);
635 static cfi_error_action
636 cfi_error_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
638 cfi_error_action error_action;
640 error_action = CFI_ERR_RETRY;
642 switch (io->io_hdr.io_type) {
644 switch (io->io_hdr.status & CTL_STATUS_MASK) {
646 switch (io->scsiio.scsi_status) {
647 case SCSI_STATUS_RESERV_CONFLICT:
649 * For a reservation conflict, we'll usually
650 * want the hard error recovery policy, so
651 * we'll reset the LUN.
653 if (lun_io->policy == CFI_ERR_HARD)
660 case SCSI_STATUS_CHECK_COND:
662 error_action = cfi_checkcond_parse(io, lun_io);
667 error_action = CFI_ERR_RETRY;
673 * In theory task management commands shouldn't fail...
675 error_action = CFI_ERR_RETRY;
678 printf("%s: invalid ctl_io type %d\n", __func__,
680 panic("%s: invalid ctl_io type %d\n", __func__,
685 return (error_action);
689 cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
690 struct cfi_metatask *metatask, cfi_error_policy policy, int retries,
691 struct cfi_lun_io *orig_lun_io,
692 void (*done_function)(union ctl_io *io))
694 struct cfi_lun_io *lun_io;
696 io->io_hdr.nexus.initid.id = 7;
697 io->io_hdr.nexus.targ_port = lun->softc->port.targ_port;
698 io->io_hdr.nexus.targ_target.id = 0;
699 io->io_hdr.nexus.targ_lun = lun->lun_id;
700 io->io_hdr.retries = retries;
701 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
702 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = lun_io;
704 lun_io->metatask = metatask;
706 lun_io->policy = policy;
707 lun_io->orig_lun_io = orig_lun_io;
708 lun_io->done_function = done_function;
710 * We only set the tag number for SCSI I/Os. For task management
711 * commands, the tag number is only really needed for aborts, so
712 * the caller can set it if necessary.
714 switch (io->io_hdr.io_type) {
716 io->scsiio.tag_num = lun->cur_tag_num++;
725 cfi_done(union ctl_io *io)
727 struct cfi_lun_io *lun_io;
728 struct cfi_softc *softc;
731 lun_io = (struct cfi_lun_io *)
732 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
738 * Very minimal retry logic. We basically retry if we got an error
739 * back, and the retry count is greater than 0. If we ever want
740 * more sophisticated initiator type behavior, the CAM error
741 * recovery code in ../common might be helpful.
743 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
744 && (io->io_hdr.retries > 0)) {
745 ctl_io_status old_status;
746 cfi_error_action error_action;
748 error_action = cfi_error_parse(io, lun_io);
750 switch (error_action & CFI_ERR_MASK) {
753 break; /* NOTREACHED */
754 case CFI_ERR_LUN_RESET: {
755 union ctl_io *new_io;
756 struct cfi_lun_io *new_lun_io;
758 new_io = ctl_alloc_io(softc->port.ctl_pool_ref);
761 new_io->io_hdr.io_type = CTL_IO_TASK;
762 new_io->taskio.task_action = CTL_TASK_LUN_RESET;
767 /*policy*/ CFI_ERR_SOFT,
769 /*orig_lun_io*/lun_io,
770 /*done_function*/ cfi_err_recovery_done);
773 new_lun_io = (struct cfi_lun_io *)
774 new_io->io_hdr.port_priv;
776 mtx_lock(&lun->softc->lock);
777 STAILQ_INSERT_TAIL(&lun->io_list, new_lun_io, links);
778 mtx_unlock(&lun->softc->lock);
785 if ((error_action & CFI_ERR_NO_DECREMENT) == 0)
786 io->io_hdr.retries--;
790 old_status = io->io_hdr.status;
791 io->io_hdr.status = CTL_STATUS_NONE;
793 io->io_hdr.flags &= ~CTL_FLAG_ALREADY_DONE;
795 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
796 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
798 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
799 printf("%s: error returned from ctl_queue()!\n",
801 io->io_hdr.status = old_status;
806 lun_io->done_function(io);
810 cfi_lun_probe_done(union ctl_io *io)
813 struct cfi_lun_io *lun_io;
815 lun_io = (struct cfi_lun_io *)
816 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
819 switch (lun->state) {
820 case CFI_LUN_INQUIRY: {
821 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
822 /* print out something here?? */
823 printf("%s: LUN %d probe failed because inquiry "
824 "failed\n", __func__, lun->lun_id);
825 ctl_io_error_print(io, NULL);
828 if (SID_TYPE(&lun->inq_data) != T_DIRECT) {
831 lun->state = CFI_LUN_READY;
832 ctl_scsi_path_string(io, path_str,
834 printf("%s", path_str);
835 scsi_print_inquiry(&lun->inq_data);
837 lun->state = CFI_LUN_READCAPACITY;
838 cfi_lun_probe(lun, /*have_lock*/ 0);
841 mtx_lock(&lun->softc->lock);
842 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
843 mtx_unlock(&lun->softc->lock);
847 case CFI_LUN_READCAPACITY:
848 case CFI_LUN_READCAPACITY_16: {
855 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
856 printf("%s: LUN %d probe failed because READ CAPACITY "
857 "failed\n", __func__, lun->lun_id);
858 ctl_io_error_print(io, NULL);
861 if (lun->state == CFI_LUN_READCAPACITY) {
862 struct scsi_read_capacity_data *rdcap;
864 rdcap = (struct scsi_read_capacity_data *)
865 io->scsiio.ext_data_ptr;
867 maxlba = scsi_4btoul(rdcap->addr);
868 blocksize = scsi_4btoul(rdcap->length);
869 if (blocksize == 0) {
870 printf("%s: LUN %d has invalid "
871 "blocksize 0, probe aborted\n",
872 __func__, lun->lun_id);
873 } else if (maxlba == 0xffffffff) {
874 lun->state = CFI_LUN_READCAPACITY_16;
875 cfi_lun_probe(lun, /*have_lock*/ 0);
877 lun->state = CFI_LUN_READY;
879 struct scsi_read_capacity_data_long *rdcap_long;
882 scsi_read_capacity_data_long *)
883 io->scsiio.ext_data_ptr;
884 maxlba = scsi_8btou64(rdcap_long->addr);
885 blocksize = scsi_4btoul(rdcap_long->length);
887 if (blocksize == 0) {
888 printf("%s: LUN %d has invalid "
889 "blocksize 0, probe aborted\n",
890 __func__, lun->lun_id);
892 lun->state = CFI_LUN_READY;
896 if (lun->state == CFI_LUN_READY) {
899 lun->num_blocks = maxlba + 1;
900 lun->blocksize = blocksize;
903 * If this is true, the blocksize is a power of 2.
904 * We already checked for 0 above.
906 if (((blocksize - 1) & blocksize) == 0) {
909 for (i = 0; i < 32; i++) {
910 if ((blocksize & (1 << i)) != 0) {
911 lun->blocksize_powerof2 = i;
916 ctl_scsi_path_string(io, path_str,sizeof(path_str));
917 printf("%s", path_str);
918 scsi_print_inquiry(&lun->inq_data);
919 printf("%s %ju blocks, blocksize %d\n", path_str,
920 (uintmax_t)maxlba + 1, blocksize);
922 mtx_lock(&lun->softc->lock);
923 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
924 mtx_unlock(&lun->softc->lock);
925 free(io->scsiio.ext_data_ptr, M_CTL_CFI);
931 mtx_lock(&lun->softc->lock);
932 /* How did we get here?? */
933 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
934 mtx_unlock(&lun->softc->lock);
941 cfi_lun_probe(struct cfi_lun *lun, int have_lock)
945 mtx_lock(&lun->softc->lock);
946 if ((lun->softc->flags & CFI_ONLINE) == 0) {
948 mtx_unlock(&lun->softc->lock);
952 mtx_unlock(&lun->softc->lock);
954 switch (lun->state) {
955 case CFI_LUN_INQUIRY: {
956 struct cfi_lun_io *lun_io;
959 io = ctl_alloc_io(lun->softc->port.ctl_pool_ref);
961 /*data_ptr*/(uint8_t *)&lun->inq_data,
962 /*data_len*/ sizeof(lun->inq_data),
965 /*tag_type*/ CTL_TAG_SIMPLE,
971 /*policy*/ CFI_ERR_SOFT,
973 /*orig_lun_io*/ NULL,
977 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
980 mtx_lock(&lun->softc->lock);
981 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
983 mtx_unlock(&lun->softc->lock);
985 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
986 printf("%s: error returned from ctl_queue()!\n",
988 STAILQ_REMOVE(&lun->io_list, lun_io,
994 case CFI_LUN_READCAPACITY:
995 case CFI_LUN_READCAPACITY_16: {
996 struct cfi_lun_io *lun_io;
1000 io = ctl_alloc_io(lun->softc->port.ctl_pool_ref);
1002 dataptr = malloc(sizeof(struct scsi_read_capacity_data_long),
1003 M_CTL_CFI, M_NOWAIT);
1004 if (dataptr == NULL) {
1005 printf("%s: unable to allocate SCSI read capacity "
1006 "buffer for lun %d\n", __func__, lun->lun_id);
1009 if (lun->state == CFI_LUN_READCAPACITY) {
1010 ctl_scsi_read_capacity(io,
1011 /*data_ptr*/ dataptr,
1013 sizeof(struct scsi_read_capacity_data_long),
1017 /*tag_type*/ CTL_TAG_SIMPLE,
1020 ctl_scsi_read_capacity_16(io,
1021 /*data_ptr*/ dataptr,
1023 sizeof(struct scsi_read_capacity_data_long),
1027 /*tag_type*/ CTL_TAG_SIMPLE,
1033 /*policy*/ CFI_ERR_SOFT,
1035 /*orig_lun_io*/ NULL,
1036 /*done_function*/ cfi_lun_probe_done);
1038 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1041 mtx_lock(&lun->softc->lock);
1042 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1044 mtx_unlock(&lun->softc->lock);
1046 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1047 printf("%s: error returned from ctl_queue()!\n",
1049 STAILQ_REMOVE(&lun->io_list, lun_io,
1051 free(dataptr, M_CTL_CFI);
1058 /* Why were we called? */
1064 cfi_metatask_done(struct cfi_softc *softc, struct cfi_metatask *metatask)
1066 mtx_lock(&softc->lock);
1067 STAILQ_REMOVE(&softc->metatask_list, metatask, cfi_metatask, links);
1068 mtx_unlock(&softc->lock);
1071 * Return status to the caller. Caller allocated storage, and is
1072 * responsible for calling cfi_free_metatask to release it once
1073 * they've seen the status.
1075 metatask->callback(metatask->callback_arg, metatask);
1079 cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask, union ctl_io *io)
1081 int error_code, sense_key, asc, ascq;
1083 if (metatask->tasktype != CFI_TASK_BBRREAD)
1086 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
1087 metatask->status = CFI_MT_SUCCESS;
1088 metatask->taskinfo.bbrread.status = CFI_BBR_SUCCESS;
1092 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) {
1093 metatask->status = CFI_MT_ERROR;
1094 metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1098 metatask->taskinfo.bbrread.scsi_status = io->scsiio.scsi_status;
1099 memcpy(&metatask->taskinfo.bbrread.sense_data, &io->scsiio.sense_data,
1100 MIN(sizeof(metatask->taskinfo.bbrread.sense_data),
1101 sizeof(io->scsiio.sense_data)));
1103 if (io->scsiio.scsi_status == SCSI_STATUS_RESERV_CONFLICT) {
1104 metatask->status = CFI_MT_ERROR;
1105 metatask->taskinfo.bbrread.status = CFI_BBR_RESERV_CONFLICT;
1109 if (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) {
1110 metatask->status = CFI_MT_ERROR;
1111 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1115 scsi_extract_sense_len(&io->scsiio.sense_data,
1116 io->scsiio.sense_len,
1123 switch (error_code) {
1124 case SSD_DEFERRED_ERROR:
1125 case SSD_DESC_DEFERRED_ERROR:
1126 metatask->status = CFI_MT_ERROR;
1127 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1129 case SSD_CURRENT_ERROR:
1130 case SSD_DESC_CURRENT_ERROR:
1132 struct scsi_sense_data *sense;
1134 sense = &io->scsiio.sense_data;
1136 if ((asc == 0x04) && (ascq == 0x02)) {
1137 metatask->status = CFI_MT_ERROR;
1138 metatask->taskinfo.bbrread.status = CFI_BBR_LUN_STOPPED;
1139 } else if ((asc == 0x04) && (ascq == 0x03)) {
1140 metatask->status = CFI_MT_ERROR;
1141 metatask->taskinfo.bbrread.status =
1142 CFI_BBR_LUN_OFFLINE_CTL;
1143 } else if ((asc == 0x44) && (ascq == 0x00)) {
1145 if (sense->sense_key_spec[0] & SSD_SCS_VALID) {
1146 uint16_t retry_count;
1148 retry_count = sense->sense_key_spec[1] << 8 |
1149 sense->sense_key_spec[2];
1150 if (((retry_count & 0xf000) == CSC_RAIDCORE)
1151 && ((retry_count & 0x0f00) == CSC_SHELF_SW)
1152 && ((retry_count & 0xff) ==
1153 RC_STS_DEVICE_OFFLINE)) {
1154 metatask->status = CFI_MT_ERROR;
1155 metatask->taskinfo.bbrread.status =
1156 CFI_BBR_LUN_OFFLINE_RC;
1158 metatask->status = CFI_MT_ERROR;
1159 metatask->taskinfo.bbrread.status =
1163 #endif /* NEEDTOPORT */
1164 metatask->status = CFI_MT_ERROR;
1165 metatask->taskinfo.bbrread.status =
1171 metatask->status = CFI_MT_ERROR;
1172 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1180 cfi_metatask_io_done(union ctl_io *io)
1182 struct cfi_lun_io *lun_io;
1183 struct cfi_metatask *metatask;
1184 struct cfi_softc *softc;
1185 struct cfi_lun *lun;
1187 lun_io = (struct cfi_lun_io *)
1188 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1193 metatask = lun_io->metatask;
1195 switch (metatask->tasktype) {
1196 case CFI_TASK_STARTUP:
1197 case CFI_TASK_SHUTDOWN: {
1198 int failed, done, is_start;
1202 if (metatask->tasktype == CFI_TASK_STARTUP)
1207 mtx_lock(&softc->lock);
1208 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
1209 metatask->taskinfo.startstop.luns_complete++;
1211 metatask->taskinfo.startstop.luns_failed++;
1214 if ((metatask->taskinfo.startstop.luns_complete +
1215 metatask->taskinfo.startstop.luns_failed) >=
1216 metatask->taskinfo.startstop.total_luns)
1219 mtx_unlock(&softc->lock);
1222 printf("%s: LUN %d %s request failed\n", __func__,
1223 lun_io->lun->lun_id, (is_start == 1) ? "start" :
1225 ctl_io_error_print(io, &lun_io->lun->inq_data);
1228 if (metatask->taskinfo.startstop.luns_failed > 0)
1229 metatask->status = CFI_MT_ERROR;
1231 metatask->status = CFI_MT_SUCCESS;
1232 cfi_metatask_done(softc, metatask);
1234 mtx_lock(&softc->lock);
1235 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1236 mtx_unlock(&softc->lock);
1241 case CFI_TASK_BBRREAD: {
1243 * Translate the SCSI error into an enumeration.
1245 cfi_metatask_bbr_errorparse(metatask, io);
1247 mtx_lock(&softc->lock);
1248 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1249 mtx_unlock(&softc->lock);
1253 cfi_metatask_done(softc, metatask);
1258 * This shouldn't happen.
1260 mtx_lock(&softc->lock);
1261 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1262 mtx_unlock(&softc->lock);
1270 cfi_err_recovery_done(union ctl_io *io)
1272 struct cfi_lun_io *lun_io, *orig_lun_io;
1273 struct cfi_lun *lun;
1274 union ctl_io *orig_io;
1276 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1277 orig_lun_io = lun_io->orig_lun_io;
1278 orig_io = orig_lun_io->ctl_io;
1281 if (io->io_hdr.status != CTL_SUCCESS) {
1282 printf("%s: error recovery action failed. Original "
1283 "error:\n", __func__);
1285 ctl_io_error_print(orig_lun_io->ctl_io, &lun->inq_data);
1287 printf("%s: error from error recovery action:\n", __func__);
1289 ctl_io_error_print(io, &lun->inq_data);
1291 printf("%s: trying original command again...\n", __func__);
1294 mtx_lock(&lun->softc->lock);
1295 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1296 mtx_unlock(&lun->softc->lock);
1299 orig_io->io_hdr.retries--;
1300 orig_io->io_hdr.status = CTL_STATUS_NONE;
1302 if (ctl_queue(orig_io) != CTL_RETVAL_COMPLETE) {
1303 printf("%s: error returned from ctl_queue()!\n", __func__);
1304 STAILQ_REMOVE(&lun->io_list, orig_lun_io,
1306 ctl_free_io(orig_io);
1311 cfi_lun_io_done(union ctl_io *io)
1313 struct cfi_lun *lun;
1314 struct cfi_lun_io *lun_io;
1316 lun_io = (struct cfi_lun_io *)
1317 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1320 if (lun_io->metatask == NULL) {
1321 printf("%s: I/O has no metatask pointer, discarding\n",
1323 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1327 cfi_metatask_io_done(io);
1331 cfi_action(struct cfi_metatask *metatask)
1333 struct cfi_softc *softc;
1335 softc = &fetd_internal_softc;
1337 mtx_lock(&softc->lock);
1339 STAILQ_INSERT_TAIL(&softc->metatask_list, metatask, links);
1341 if ((softc->flags & CFI_ONLINE) == 0) {
1342 mtx_unlock(&softc->lock);
1343 metatask->status = CFI_MT_PORT_OFFLINE;
1344 cfi_metatask_done(softc, metatask);
1347 mtx_unlock(&softc->lock);
1349 switch (metatask->tasktype) {
1350 case CFI_TASK_STARTUP:
1351 case CFI_TASK_SHUTDOWN: {
1353 int da_luns, ios_allocated, do_start;
1354 struct cfi_lun *lun;
1355 STAILQ_HEAD(, ctl_io_hdr) tmp_io_list;
1359 STAILQ_INIT(&tmp_io_list);
1361 if (metatask->tasktype == CFI_TASK_STARTUP)
1366 mtx_lock(&softc->lock);
1367 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1368 if (lun->state != CFI_LUN_READY)
1371 if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1374 io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref);
1377 STAILQ_INSERT_TAIL(&tmp_io_list, &io->io_hdr,
1382 if (ios_allocated < da_luns) {
1383 printf("%s: error allocating ctl_io for %s\n",
1384 __func__, (do_start == 1) ? "startup" :
1386 da_luns = ios_allocated;
1389 metatask->taskinfo.startstop.total_luns = da_luns;
1391 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1392 struct cfi_lun_io *lun_io;
1394 if (lun->state != CFI_LUN_READY)
1397 if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1400 io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1404 STAILQ_REMOVE(&tmp_io_list, &io->io_hdr, ctl_io_hdr,
1407 ctl_scsi_start_stop(io,
1411 /*power_conditions*/
1414 /*ctl_tag_type*/ CTL_TAG_ORDERED,
1419 /*metatask*/ metatask,
1420 /*policy*/ CFI_ERR_HARD,
1422 /*orig_lun_io*/ NULL,
1423 /*done_function*/ cfi_lun_io_done);
1425 lun_io = (struct cfi_lun_io *) io->io_hdr.port_priv;
1427 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1429 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1430 printf("%s: error returned from ctl_queue()!\n",
1432 STAILQ_REMOVE(&lun->io_list, lun_io,
1435 metatask->taskinfo.startstop.total_luns--;
1439 if (STAILQ_FIRST(&tmp_io_list) != NULL) {
1440 printf("%s: error: tmp_io_list != NULL\n", __func__);
1441 for (io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1443 io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list)) {
1444 STAILQ_REMOVE(&tmp_io_list, &io->io_hdr,
1449 mtx_unlock(&softc->lock);
1453 case CFI_TASK_BBRREAD: {
1455 struct cfi_lun *lun;
1456 struct cfi_lun_io *lun_io;
1457 cfi_bbrread_status status;
1459 uint32_t num_blocks;
1461 status = CFI_BBR_SUCCESS;
1463 req_lun_num = metatask->taskinfo.bbrread.lun_num;
1465 mtx_lock(&softc->lock);
1466 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1467 if (lun->lun_id != req_lun_num)
1469 if (lun->state != CFI_LUN_READY) {
1470 status = CFI_BBR_LUN_UNCONFIG;
1477 status = CFI_BBR_NO_LUN;
1479 if (status != CFI_BBR_SUCCESS) {
1480 metatask->status = CFI_MT_ERROR;
1481 metatask->taskinfo.bbrread.status = status;
1482 mtx_unlock(&softc->lock);
1483 cfi_metatask_done(softc, metatask);
1488 * Convert the number of bytes given into blocks and check
1489 * that the number of bytes is a multiple of the blocksize.
1490 * CTL will verify that the LBA is okay.
1492 if (lun->blocksize_powerof2 != 0) {
1493 if ((metatask->taskinfo.bbrread.len &
1494 (lun->blocksize - 1)) != 0) {
1495 metatask->status = CFI_MT_ERROR;
1496 metatask->taskinfo.bbrread.status =
1498 cfi_metatask_done(softc, metatask);
1502 num_blocks = metatask->taskinfo.bbrread.len >>
1503 lun->blocksize_powerof2;
1506 * XXX KDM this could result in floating point
1507 * division, which isn't supported in the kernel on
1510 if ((metatask->taskinfo.bbrread.len %
1511 lun->blocksize) != 0) {
1512 metatask->status = CFI_MT_ERROR;
1513 metatask->taskinfo.bbrread.status =
1515 cfi_metatask_done(softc, metatask);
1520 * XXX KDM this could result in floating point
1521 * division in some cases.
1523 num_blocks = metatask->taskinfo.bbrread.len /
1528 io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref);
1530 metatask->status = CFI_MT_ERROR;
1531 metatask->taskinfo.bbrread.status = CFI_BBR_NO_MEM;
1532 mtx_unlock(&softc->lock);
1533 cfi_metatask_done(softc, metatask);
1538 * XXX KDM need to do a read capacity to get the blocksize
1541 ctl_scsi_read_write(io,
1543 /*data_len*/ metatask->taskinfo.bbrread.len,
1546 /*minimum_cdb_size*/ 0,
1547 /*lba*/ metatask->taskinfo.bbrread.lba,
1548 /*num_blocks*/ num_blocks,
1549 /*tag_type*/ CTL_TAG_SIMPLE,
1554 /*metatask*/ metatask,
1555 /*policy*/ CFI_ERR_SOFT,
1557 /*orig_lun_io*/ NULL,
1558 /*done_function*/ cfi_lun_io_done);
1560 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1562 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1564 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1565 printf("%s: error returned from ctl_queue()!\n",
1567 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1569 metatask->status = CFI_MT_ERROR;
1570 metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1571 mtx_unlock(&softc->lock);
1572 cfi_metatask_done(softc, metatask);
1576 mtx_unlock(&softc->lock);
1580 panic("invalid metatask type %d", metatask->tasktype);
1581 break; /* NOTREACHED */
1585 struct cfi_metatask *
1586 cfi_alloc_metatask(int can_wait)
1588 struct cfi_metatask *metatask;
1589 struct cfi_softc *softc;
1591 softc = &fetd_internal_softc;
1593 metatask = uma_zalloc(cfi_metatask_zone,
1594 (can_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
1595 if (metatask == NULL)
1598 metatask->status = CFI_MT_NONE;
1604 cfi_free_metatask(struct cfi_metatask *metatask)
1607 uma_zfree(cfi_metatask_zone, metatask);