2 * Copyright (c) 2004, 2005 Silicon Graphics International Corp.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * substantially similar to the "NO WARRANTY" disclaimer below
13 * ("Disclaimer") and any redistribution must be conditioned upon
14 * including a substantially similar Disclaimer requirement for further
15 * binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_internal.c#5 $
33 * CTL kernel internal frontend target driver. This allows kernel-level
34 * clients to send commands into CTL.
36 * This has elements of a FETD (e.g. it has to set tag numbers, initiator,
37 * port, target, and LUN) and elements of an initiator (LUN discovery and
38 * probing, error recovery, command initiation). Even though this has some
39 * initiator type elements, this is not intended to be a full fledged
40 * initiator layer. It is only intended to send a limited number of
41 * commands to a well known target layer.
43 * To be able to fulfill the role of a full initiator layer, it would need
44 * a whole lot more functionality.
46 * Author: Ken Merry <ken@FreeBSD.org>
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/types.h>
57 #include <sys/malloc.h>
58 #include <sys/module.h>
60 #include <sys/mutex.h>
61 #include <sys/condvar.h>
62 #include <sys/queue.h>
64 #include <sys/sysctl.h>
65 #include <cam/scsi/scsi_all.h>
66 #include <cam/scsi/scsi_da.h>
67 #include <cam/ctl/ctl_io.h>
68 #include <cam/ctl/ctl.h>
69 #include <cam/ctl/ctl_frontend.h>
70 #include <cam/ctl/ctl_frontend_internal.h>
71 #include <cam/ctl/ctl_backend.h>
72 #include <cam/ctl/ctl_ioctl.h>
73 #include <cam/ctl/ctl_util.h>
74 #include <cam/ctl/ctl_ha.h>
75 #include <cam/ctl/ctl_private.h>
76 #include <cam/ctl/ctl_mem_pool.h>
77 #include <cam/ctl/ctl_debug.h>
78 #include <cam/ctl/ctl_scsi_all.h>
79 #include <cam/ctl/ctl_error.h>
83 * - overall metatask, different potential metatask types (e.g. forced
84 * shutdown, gentle shutdown)
85 * - forced shutdown metatask:
86 * - states: report luns, pending, done?
87 * - list of luns pending, with the relevant I/O for that lun attached.
88 * This would allow moving ahead on LUNs with no errors, and going
89 * into error recovery on LUNs with problems. Per-LUN states might
90 * include inquiry, stop/offline, done.
92 * Use LUN enable for LUN list instead of getting it manually? We'd still
93 * need inquiry data for each LUN.
95 * How to handle processor LUN w.r.t. found/stopped counts?
104 struct cfi_task_startstop {
110 /* XXX KDM add more fields here */
114 struct cfi_task_startstop startstop;
117 struct cfi_metatask {
118 cfi_tasktype tasktype;
119 cfi_mt_status status;
120 union cfi_taskinfo taskinfo;
121 struct ctl_mem_element *element;
123 STAILQ_ENTRY(cfi_metatask) links;
128 CFI_ERR_RETRY = 0x000,
129 CFI_ERR_FAIL = 0x001,
130 CFI_ERR_LUN_RESET = 0x002,
131 CFI_ERR_MASK = 0x0ff,
132 CFI_ERR_NO_DECREMENT = 0x100
142 CFI_LUN_READCAPACITY,
143 CFI_LUN_READCAPACITY_16,
148 struct ctl_id target_id;
150 struct scsi_inquiry_data inq_data;
153 int blocksize_powerof2;
154 uint32_t cur_tag_num;
156 struct ctl_mem_element *element;
157 struct cfi_softc *softc;
158 STAILQ_HEAD(, cfi_lun_io) io_list;
159 STAILQ_ENTRY(cfi_lun) links;
164 struct cfi_metatask *metatask;
165 cfi_error_policy policy;
166 void (*done_function)(union ctl_io *io);
167 union ctl_io *ctl_io;
168 struct cfi_lun_io *orig_lun_io;
169 STAILQ_ENTRY(cfi_lun_io) links;
178 struct ctl_frontend fe;
182 STAILQ_HEAD(, cfi_lun) lun_list;
183 STAILQ_HEAD(, cfi_metatask) metatask_list;
184 struct ctl_mem_pool lun_pool;
185 struct ctl_mem_pool metatask_pool;
188 MALLOC_DEFINE(M_CTL_CFI, "ctlcfi", "CTL CFI");
190 static struct cfi_softc fetd_internal_softc;
191 extern int ctl_disable;
194 void cfi_shutdown(void) __unused;
195 static void cfi_online(void *arg);
196 static void cfi_offline(void *arg);
197 static int cfi_targ_enable(void *arg, struct ctl_id targ_id);
198 static int cfi_targ_disable(void *arg, struct ctl_id targ_id);
199 static int cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id);
200 static int cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id);
201 static void cfi_datamove(union ctl_io *io);
202 static cfi_error_action cfi_checkcond_parse(union ctl_io *io,
203 struct cfi_lun_io *lun_io);
204 static cfi_error_action cfi_error_parse(union ctl_io *io,
205 struct cfi_lun_io *lun_io);
206 static void cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
207 struct cfi_metatask *metatask, cfi_error_policy policy,
208 int retries, struct cfi_lun_io *orig_lun_io,
209 void (*done_function)(union ctl_io *io));
210 static void cfi_done(union ctl_io *io);
211 static void cfi_lun_probe_done(union ctl_io *io);
212 static void cfi_lun_probe(struct cfi_lun *lun, int have_lock);
213 static void cfi_metatask_done(struct cfi_softc *softc,
214 struct cfi_metatask *metatask);
215 static void cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask,
217 static void cfi_metatask_io_done(union ctl_io *io);
218 static void cfi_err_recovery_done(union ctl_io *io);
219 static void cfi_lun_io_done(union ctl_io *io);
221 static int cfi_module_event_handler(module_t, int /*modeventtype_t*/, void *);
223 static moduledata_t cfi_moduledata = {
225 cfi_module_event_handler,
229 DECLARE_MODULE(ctlcfi, cfi_moduledata, SI_SUB_CONFIGURE, SI_ORDER_FOURTH);
230 MODULE_VERSION(ctlcfi, 1);
231 MODULE_DEPEND(ctlcfi, ctl, 1, 1, 1);
236 struct cfi_softc *softc;
237 struct ctl_frontend *fe;
240 softc = &fetd_internal_softc;
246 /* If we're disabled, don't initialize */
247 if (ctl_disable != 0)
250 if (sizeof(struct cfi_lun_io) > CTL_PORT_PRIV_SIZE) {
251 printf("%s: size of struct cfi_lun_io %zd > "
252 "CTL_PORT_PRIV_SIZE %d\n", __func__,
253 sizeof(struct cfi_lun_io),
256 memset(softc, 0, sizeof(softc));
258 mtx_init(&softc->lock, "CTL frontend mutex", NULL, MTX_DEF);
259 softc->flags |= CTL_FLAG_MASTER_SHELF;
261 STAILQ_INIT(&softc->lun_list);
262 STAILQ_INIT(&softc->metatask_list);
263 sprintf(softc->fe_name, "CTL internal");
264 fe->port_type = CTL_PORT_INTERNAL;
265 fe->num_requested_ctl_io = 100;
266 fe->port_name = softc->fe_name;
267 fe->port_online = cfi_online;
268 fe->port_offline = cfi_offline;
269 fe->onoff_arg = softc;
270 fe->targ_enable = cfi_targ_enable;
271 fe->targ_disable = cfi_targ_disable;
272 fe->lun_enable = cfi_lun_enable;
273 fe->lun_disable = cfi_lun_disable;
274 fe->targ_lun_arg = softc;
275 fe->fe_datamove = cfi_datamove;
276 fe->fe_done = cfi_done;
277 fe->max_targets = 15;
278 fe->max_target_id = 15;
280 if (ctl_frontend_register(fe, (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0)
282 printf("%s: internal frontend registration failed\n", __func__);
287 if (ctl_init_mem_pool(&softc->lun_pool,
288 sizeof(struct cfi_lun),
289 CTL_MEM_POOL_PERM_GROW, /*grow_inc*/ 3,
290 /* initial_pool_size */ CTL_MAX_LUNS) != 0) {
291 printf("%s: can't initialize LUN memory pool\n", __func__);
296 if (ctl_init_mem_pool(&softc->metatask_pool,
297 sizeof(struct cfi_metatask),
298 CTL_MEM_POOL_PERM_GROW, /*grow_inc*/ 3,
299 /*initial_pool_size*/ 10) != 0) {
300 printf("%s: can't initialize metatask memory pool\n", __func__);
312 ctl_shrink_mem_pool(&softc->metatask_pool);
315 ctl_shrink_mem_pool(&softc->lun_pool);
318 ctl_frontend_deregister(fe);
330 struct cfi_softc *softc;
332 softc = &fetd_internal_softc;
335 * XXX KDM need to clear out any I/O pending on each LUN.
337 if (ctl_frontend_deregister(&softc->fe) != 0)
338 printf("%s: ctl_frontend_deregister() failed\n", __func__);
340 if (ctl_shrink_mem_pool(&softc->lun_pool) != 0)
341 printf("%s: error shrinking LUN pool\n", __func__);
343 if (ctl_shrink_mem_pool(&softc->metatask_pool) != 0)
344 printf("%s: error shrinking LUN pool\n", __func__);
348 cfi_module_event_handler(module_t mod, int what, void *arg)
362 cfi_online(void *arg)
364 struct cfi_softc *softc;
367 softc = (struct cfi_softc *)arg;
369 softc->flags |= CFI_ONLINE;
372 * Go through and kick off the probe for each lun. Should we check
373 * the LUN flags here to determine whether or not to probe it?
375 mtx_lock(&softc->lock);
376 STAILQ_FOREACH(lun, &softc->lun_list, links)
377 cfi_lun_probe(lun, /*have_lock*/ 1);
378 mtx_unlock(&softc->lock);
382 cfi_offline(void *arg)
384 struct cfi_softc *softc;
386 softc = (struct cfi_softc *)arg;
388 softc->flags &= ~CFI_ONLINE;
392 cfi_targ_enable(void *arg, struct ctl_id targ_id)
398 cfi_targ_disable(void *arg, struct ctl_id targ_id)
404 cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
406 struct ctl_mem_element *element;
407 struct cfi_softc *softc;
411 softc = (struct cfi_softc *)arg;
414 mtx_lock(&softc->lock);
415 STAILQ_FOREACH(lun, &softc->lun_list, links) {
416 if ((lun->target_id.id == target_id.id)
417 && (lun->lun_id == lun_id)) {
422 mtx_unlock(&softc->lock);
425 * If we already have this target/LUN, there is no reason to add
426 * it to our lists again.
431 element = ctl_alloc_mem_element(&softc->lun_pool, /*can_wait*/ 0);
433 if (element == NULL) {
434 printf("%s: unable to allocate LUN structure\n", __func__);
438 lun = (struct cfi_lun *)element->bytes;
440 lun->element = element;
441 lun->target_id = target_id;
442 lun->lun_id = lun_id;
443 lun->cur_tag_num = 0;
444 lun->state = CFI_LUN_INQUIRY;
446 STAILQ_INIT(&lun->io_list);
448 mtx_lock(&softc->lock);
449 STAILQ_INSERT_TAIL(&softc->lun_list, lun, links);
450 mtx_unlock(&softc->lock);
452 cfi_lun_probe(lun, /*have_lock*/ 0);
458 cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id)
460 struct cfi_softc *softc;
464 softc = (struct cfi_softc *)arg;
469 * XXX KDM need to do an invalidate and then a free when any
470 * pending I/O has completed. Or do we? CTL won't free a LUN
471 * while any I/O is pending. So we won't get this notification
472 * unless any I/O we have pending on a LUN has completed.
474 mtx_lock(&softc->lock);
475 STAILQ_FOREACH(lun, &softc->lun_list, links) {
476 if ((lun->target_id.id == target_id.id)
477 && (lun->lun_id == lun_id)) {
483 STAILQ_REMOVE(&softc->lun_list, lun, cfi_lun, links);
485 mtx_unlock(&softc->lock);
488 printf("%s: can't find target %ju lun %d\n", __func__,
489 (uintmax_t)target_id.id, lun_id);
493 ctl_free_mem_element(lun->element);
499 * XXX KDM run this inside a thread, or inside the caller's context?
502 cfi_datamove(union ctl_io *io)
504 struct ctl_sg_entry *ext_sglist, *kern_sglist;
505 struct ctl_sg_entry ext_entry, kern_entry;
506 int ext_sglen, ext_sg_entries, kern_sg_entries;
507 int ext_sg_start, ext_offset;
508 int len_to_copy, len_copied;
509 int kern_watermark, ext_watermark;
510 int ext_sglist_malloced;
511 struct ctl_scsiio *ctsio;
514 ext_sglist_malloced = 0;
519 CTL_DEBUG_PRINT(("%s\n", __func__));
524 * If this is the case, we're probably doing a BBR read and don't
525 * actually need to transfer the data. This will effectively
526 * bit-bucket the data.
528 if (ctsio->ext_data_ptr == NULL)
532 * To simplify things here, if we have a single buffer, stick it in
533 * a S/G entry and just make it a single entry S/G list.
535 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
538 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
541 * XXX KDM GFP_KERNEL, don't know what the caller's context
542 * is. Need to figure that out.
544 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL_CFI,
546 if (ext_sglist == NULL) {
547 ctl_set_internal_failure(ctsio,
552 ext_sglist_malloced = 1;
553 if (memcpy(ext_sglist, ctsio->ext_data_ptr, ext_sglen) != 0) {
554 ctl_set_internal_failure(ctsio,
559 ext_sg_entries = ctsio->ext_sg_entries;
561 for (i = 0; i < ext_sg_entries; i++) {
562 if ((len_seen + ext_sglist[i].len) >=
563 ctsio->ext_data_filled) {
565 ext_offset = ctsio->ext_data_filled - len_seen;
568 len_seen += ext_sglist[i].len;
571 ext_sglist = &ext_entry;
572 ext_sglist->addr = ctsio->ext_data_ptr;
573 ext_sglist->len = ctsio->ext_data_len;
576 ext_offset = ctsio->ext_data_filled;
579 if (ctsio->kern_sg_entries > 0) {
580 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
581 kern_sg_entries = ctsio->kern_sg_entries;
583 kern_sglist = &kern_entry;
584 kern_sglist->addr = ctsio->kern_data_ptr;
585 kern_sglist->len = ctsio->kern_data_len;
591 ext_watermark = ext_offset;
593 for (i = ext_sg_start, j = 0;
594 i < ext_sg_entries && j < kern_sg_entries;) {
595 uint8_t *ext_ptr, *kern_ptr;
597 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark,
598 kern_sglist[j].len - kern_watermark);
600 ext_ptr = (uint8_t *)ext_sglist[i].addr;
601 ext_ptr = ext_ptr + ext_watermark;
602 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
606 panic("need to implement bus address support");
608 kern_ptr = bus_to_virt(kern_sglist[j].addr);
611 kern_ptr = (uint8_t *)kern_sglist[j].addr;
612 kern_ptr = kern_ptr + kern_watermark;
614 kern_watermark += len_to_copy;
615 ext_watermark += len_to_copy;
617 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
619 CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
620 __func__, len_to_copy));
621 CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
623 memcpy(ext_ptr, kern_ptr, len_to_copy);
625 CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n",
626 __func__, len_to_copy));
627 CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
629 memcpy(kern_ptr, ext_ptr, len_to_copy);
632 len_copied += len_to_copy;
634 if (ext_sglist[i].len == ext_watermark) {
639 if (kern_sglist[j].len == kern_watermark) {
645 ctsio->ext_data_filled += len_copied;
647 CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
648 __func__, ext_sg_entries, kern_sg_entries));
649 CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
650 __func__, ctsio->ext_data_len, ctsio->kern_data_len));
653 /* XXX KDM set residual?? */
656 if (ext_sglist_malloced != 0)
657 free(ext_sglist, M_CTL_CFI);
659 io->scsiio.be_move_done(io);
665 * For any sort of check condition, busy, etc., we just retry. We do not
666 * decrement the retry count for unit attention type errors. These are
667 * normal, and we want to save the retry count for "real" errors. Otherwise,
668 * we could end up with situations where a command will succeed in some
669 * situations and fail in others, depending on whether a unit attention is
670 * pending. Also, some of our error recovery actions, most notably the
671 * LUN reset action, will cause a unit attention.
673 * We can add more detail here later if necessary.
675 static cfi_error_action
676 cfi_checkcond_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
678 cfi_error_action error_action;
679 int error_code, sense_key, asc, ascq;
682 * Default to retrying the command.
684 error_action = CFI_ERR_RETRY;
686 scsi_extract_sense_len(&io->scsiio.sense_data,
687 io->scsiio.sense_len,
694 switch (error_code) {
695 case SSD_DEFERRED_ERROR:
696 case SSD_DESC_DEFERRED_ERROR:
697 error_action |= CFI_ERR_NO_DECREMENT;
699 case SSD_CURRENT_ERROR:
700 case SSD_DESC_CURRENT_ERROR:
703 case SSD_KEY_UNIT_ATTENTION:
704 error_action |= CFI_ERR_NO_DECREMENT;
706 case SSD_KEY_HARDWARE_ERROR:
708 * This is our generic "something bad happened"
709 * error code. It often isn't recoverable.
711 if ((asc == 0x44) && (ascq == 0x00))
712 error_action = CFI_ERR_FAIL;
714 case SSD_KEY_NOT_READY:
716 * If the LUN is powered down, there likely isn't
717 * much point in retrying right now.
719 if ((asc == 0x04) && (ascq == 0x02))
720 error_action = CFI_ERR_FAIL;
722 * If the LUN is offline, there probably isn't much
723 * point in retrying, either.
725 if ((asc == 0x04) && (ascq == 0x03))
726 error_action = CFI_ERR_FAIL;
732 return (error_action);
735 static cfi_error_action
736 cfi_error_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
738 cfi_error_action error_action;
740 error_action = CFI_ERR_RETRY;
742 switch (io->io_hdr.io_type) {
744 switch (io->io_hdr.status & CTL_STATUS_MASK) {
746 switch (io->scsiio.scsi_status) {
747 case SCSI_STATUS_RESERV_CONFLICT:
749 * For a reservation conflict, we'll usually
750 * want the hard error recovery policy, so
751 * we'll reset the LUN.
753 if (lun_io->policy == CFI_ERR_HARD)
760 case SCSI_STATUS_CHECK_COND:
762 error_action = cfi_checkcond_parse(io, lun_io);
767 error_action = CFI_ERR_RETRY;
773 * In theory task management commands shouldn't fail...
775 error_action = CFI_ERR_RETRY;
778 printf("%s: invalid ctl_io type %d\n", __func__,
780 panic("%s: invalid ctl_io type %d\n", __func__,
785 return (error_action);
789 cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
790 struct cfi_metatask *metatask, cfi_error_policy policy, int retries,
791 struct cfi_lun_io *orig_lun_io,
792 void (*done_function)(union ctl_io *io))
794 struct cfi_lun_io *lun_io;
796 io->io_hdr.nexus.initid.id = 7;
797 io->io_hdr.nexus.targ_port = lun->softc->fe.targ_port;
798 io->io_hdr.nexus.targ_target.id = lun->target_id.id;
799 io->io_hdr.nexus.targ_lun = lun->lun_id;
800 io->io_hdr.retries = retries;
801 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
802 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = lun_io;
804 lun_io->metatask = metatask;
806 lun_io->policy = policy;
807 lun_io->orig_lun_io = orig_lun_io;
808 lun_io->done_function = done_function;
810 * We only set the tag number for SCSI I/Os. For task management
811 * commands, the tag number is only really needed for aborts, so
812 * the caller can set it if necessary.
814 switch (io->io_hdr.io_type) {
816 io->scsiio.tag_num = lun->cur_tag_num++;
825 cfi_done(union ctl_io *io)
827 struct cfi_lun_io *lun_io;
828 struct cfi_softc *softc;
831 lun_io = (struct cfi_lun_io *)
832 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
838 * Very minimal retry logic. We basically retry if we got an error
839 * back, and the retry count is greater than 0. If we ever want
840 * more sophisticated initiator type behavior, the CAM error
841 * recovery code in ../common might be helpful.
843 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
844 && (io->io_hdr.retries > 0)) {
845 ctl_io_status old_status;
846 cfi_error_action error_action;
848 error_action = cfi_error_parse(io, lun_io);
850 switch (error_action & CFI_ERR_MASK) {
853 break; /* NOTREACHED */
854 case CFI_ERR_LUN_RESET: {
855 union ctl_io *new_io;
856 struct cfi_lun_io *new_lun_io;
858 new_io = ctl_alloc_io(softc->fe.ctl_pool_ref);
859 if (new_io == NULL) {
860 printf("%s: unable to allocate ctl_io for "
861 "error recovery\n", __func__);
866 new_io->io_hdr.io_type = CTL_IO_TASK;
867 new_io->taskio.task_action = CTL_TASK_LUN_RESET;
872 /*policy*/ CFI_ERR_SOFT,
874 /*orig_lun_io*/lun_io,
875 /*done_function*/ cfi_err_recovery_done);
878 new_lun_io = (struct cfi_lun_io *)
879 new_io->io_hdr.port_priv;
881 mtx_lock(&lun->softc->lock);
882 STAILQ_INSERT_TAIL(&lun->io_list, new_lun_io, links);
883 mtx_unlock(&lun->softc->lock);
890 if ((error_action & CFI_ERR_NO_DECREMENT) == 0)
891 io->io_hdr.retries--;
895 old_status = io->io_hdr.status;
896 io->io_hdr.status = CTL_STATUS_NONE;
898 io->io_hdr.flags &= ~CTL_FLAG_ALREADY_DONE;
900 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
901 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
903 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
904 printf("%s: error returned from ctl_queue()!\n",
906 io->io_hdr.status = old_status;
911 lun_io->done_function(io);
915 cfi_lun_probe_done(union ctl_io *io)
918 struct cfi_lun_io *lun_io;
920 lun_io = (struct cfi_lun_io *)
921 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
924 switch (lun->state) {
925 case CFI_LUN_INQUIRY: {
926 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
927 /* print out something here?? */
928 printf("%s: LUN %d probe failed because inquiry "
929 "failed\n", __func__, lun->lun_id);
930 ctl_io_error_print(io, NULL);
933 if (SID_TYPE(&lun->inq_data) != T_DIRECT) {
936 lun->state = CFI_LUN_READY;
937 ctl_scsi_path_string(io, path_str,
939 printf("%s", path_str);
940 scsi_print_inquiry(&lun->inq_data);
942 lun->state = CFI_LUN_READCAPACITY;
943 cfi_lun_probe(lun, /*have_lock*/ 0);
946 mtx_lock(&lun->softc->lock);
947 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
948 mtx_unlock(&lun->softc->lock);
952 case CFI_LUN_READCAPACITY:
953 case CFI_LUN_READCAPACITY_16: {
960 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
961 printf("%s: LUN %d probe failed because READ CAPACITY "
962 "failed\n", __func__, lun->lun_id);
963 ctl_io_error_print(io, NULL);
966 if (lun->state == CFI_LUN_READCAPACITY) {
967 struct scsi_read_capacity_data *rdcap;
969 rdcap = (struct scsi_read_capacity_data *)
970 io->scsiio.ext_data_ptr;
972 maxlba = scsi_4btoul(rdcap->addr);
973 blocksize = scsi_4btoul(rdcap->length);
974 if (blocksize == 0) {
975 printf("%s: LUN %d has invalid "
976 "blocksize 0, probe aborted\n",
977 __func__, lun->lun_id);
978 } else if (maxlba == 0xffffffff) {
979 lun->state = CFI_LUN_READCAPACITY_16;
980 cfi_lun_probe(lun, /*have_lock*/ 0);
982 lun->state = CFI_LUN_READY;
984 struct scsi_read_capacity_data_long *rdcap_long;
987 scsi_read_capacity_data_long *)
988 io->scsiio.ext_data_ptr;
989 maxlba = scsi_8btou64(rdcap_long->addr);
990 blocksize = scsi_4btoul(rdcap_long->length);
992 if (blocksize == 0) {
993 printf("%s: LUN %d has invalid "
994 "blocksize 0, probe aborted\n",
995 __func__, lun->lun_id);
997 lun->state = CFI_LUN_READY;
1001 if (lun->state == CFI_LUN_READY) {
1004 lun->num_blocks = maxlba + 1;
1005 lun->blocksize = blocksize;
1008 * If this is true, the blocksize is a power of 2.
1009 * We already checked for 0 above.
1011 if (((blocksize - 1) & blocksize) == 0) {
1014 for (i = 0; i < 32; i++) {
1015 if ((blocksize & (1 << i)) != 0) {
1016 lun->blocksize_powerof2 = i;
1021 ctl_scsi_path_string(io, path_str,sizeof(path_str));
1022 printf("%s", path_str);
1023 scsi_print_inquiry(&lun->inq_data);
1024 printf("%s %ju blocks, blocksize %d\n", path_str,
1025 (uintmax_t)maxlba + 1, blocksize);
1027 mtx_lock(&lun->softc->lock);
1028 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1029 mtx_unlock(&lun->softc->lock);
1030 free(io->scsiio.ext_data_ptr, M_CTL_CFI);
1036 mtx_lock(&lun->softc->lock);
1037 /* How did we get here?? */
1038 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1039 mtx_unlock(&lun->softc->lock);
1046 cfi_lun_probe(struct cfi_lun *lun, int have_lock)
1050 mtx_lock(&lun->softc->lock);
1051 if ((lun->softc->flags & CFI_ONLINE) == 0) {
1053 mtx_unlock(&lun->softc->lock);
1057 mtx_unlock(&lun->softc->lock);
1059 switch (lun->state) {
1060 case CFI_LUN_INQUIRY: {
1061 struct cfi_lun_io *lun_io;
1064 io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
1066 printf("%s: unable to alloc ctl_io for target %ju "
1067 "lun %d probe\n", __func__,
1068 (uintmax_t)lun->target_id.id, lun->lun_id);
1071 ctl_scsi_inquiry(io,
1072 /*data_ptr*/(uint8_t *)&lun->inq_data,
1073 /*data_len*/ sizeof(lun->inq_data),
1076 /*tag_type*/ CTL_TAG_SIMPLE,
1082 /*policy*/ CFI_ERR_SOFT,
1084 /*orig_lun_io*/ NULL,
1086 cfi_lun_probe_done);
1088 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1091 mtx_lock(&lun->softc->lock);
1092 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1094 mtx_unlock(&lun->softc->lock);
1096 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1097 printf("%s: error returned from ctl_queue()!\n",
1099 STAILQ_REMOVE(&lun->io_list, lun_io,
1105 case CFI_LUN_READCAPACITY:
1106 case CFI_LUN_READCAPACITY_16: {
1107 struct cfi_lun_io *lun_io;
1111 io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
1113 printf("%s: unable to alloc ctl_io for target %ju "
1114 "lun %d probe\n", __func__,
1115 (uintmax_t)lun->target_id.id, lun->lun_id);
1119 dataptr = malloc(sizeof(struct scsi_read_capacity_data_long),
1120 M_CTL_CFI, M_NOWAIT);
1121 if (dataptr == NULL) {
1122 printf("%s: unable to allocate SCSI read capacity "
1123 "buffer for target %ju lun %d\n", __func__,
1124 (uintmax_t)lun->target_id.id, lun->lun_id);
1127 if (lun->state == CFI_LUN_READCAPACITY) {
1128 ctl_scsi_read_capacity(io,
1129 /*data_ptr*/ dataptr,
1131 sizeof(struct scsi_read_capacity_data_long),
1135 /*tag_type*/ CTL_TAG_SIMPLE,
1138 ctl_scsi_read_capacity_16(io,
1139 /*data_ptr*/ dataptr,
1141 sizeof(struct scsi_read_capacity_data_long),
1145 /*tag_type*/ CTL_TAG_SIMPLE,
1151 /*policy*/ CFI_ERR_SOFT,
1153 /*orig_lun_io*/ NULL,
1154 /*done_function*/ cfi_lun_probe_done);
1156 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1159 mtx_lock(&lun->softc->lock);
1160 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1162 mtx_unlock(&lun->softc->lock);
1164 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1165 printf("%s: error returned from ctl_queue()!\n",
1167 STAILQ_REMOVE(&lun->io_list, lun_io,
1169 free(dataptr, M_CTL_CFI);
1176 /* Why were we called? */
1182 cfi_metatask_done(struct cfi_softc *softc, struct cfi_metatask *metatask)
1184 mtx_lock(&softc->lock);
1185 STAILQ_REMOVE(&softc->metatask_list, metatask, cfi_metatask, links);
1186 mtx_unlock(&softc->lock);
1189 * Return status to the caller. Caller allocated storage, and is
1190 * responsible for calling cfi_free_metatask to release it once
1191 * they've seen the status.
1193 metatask->callback(metatask->callback_arg, metatask);
1197 cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask, union ctl_io *io)
1199 int error_code, sense_key, asc, ascq;
1201 if (metatask->tasktype != CFI_TASK_BBRREAD)
1204 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
1205 metatask->status = CFI_MT_SUCCESS;
1206 metatask->taskinfo.bbrread.status = CFI_BBR_SUCCESS;
1210 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) {
1211 metatask->status = CFI_MT_ERROR;
1212 metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1216 metatask->taskinfo.bbrread.scsi_status = io->scsiio.scsi_status;
1217 memcpy(&metatask->taskinfo.bbrread.sense_data, &io->scsiio.sense_data,
1218 ctl_min(sizeof(metatask->taskinfo.bbrread.sense_data),
1219 sizeof(io->scsiio.sense_data)));
1221 if (io->scsiio.scsi_status == SCSI_STATUS_RESERV_CONFLICT) {
1222 metatask->status = CFI_MT_ERROR;
1223 metatask->taskinfo.bbrread.status = CFI_BBR_RESERV_CONFLICT;
1227 if (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) {
1228 metatask->status = CFI_MT_ERROR;
1229 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1233 scsi_extract_sense_len(&io->scsiio.sense_data,
1234 io->scsiio.sense_len,
1241 switch (error_code) {
1242 case SSD_DEFERRED_ERROR:
1243 case SSD_DESC_DEFERRED_ERROR:
1244 metatask->status = CFI_MT_ERROR;
1245 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1247 case SSD_CURRENT_ERROR:
1248 case SSD_DESC_CURRENT_ERROR:
1250 struct scsi_sense_data *sense;
1252 sense = &io->scsiio.sense_data;
1254 if ((asc == 0x04) && (ascq == 0x02)) {
1255 metatask->status = CFI_MT_ERROR;
1256 metatask->taskinfo.bbrread.status = CFI_BBR_LUN_STOPPED;
1257 } else if ((asc == 0x04) && (ascq == 0x03)) {
1258 metatask->status = CFI_MT_ERROR;
1259 metatask->taskinfo.bbrread.status =
1260 CFI_BBR_LUN_OFFLINE_CTL;
1261 } else if ((asc == 0x44) && (ascq == 0x00)) {
1263 if (sense->sense_key_spec[0] & SSD_SCS_VALID) {
1264 uint16_t retry_count;
1266 retry_count = sense->sense_key_spec[1] << 8 |
1267 sense->sense_key_spec[2];
1268 if (((retry_count & 0xf000) == CSC_RAIDCORE)
1269 && ((retry_count & 0x0f00) == CSC_SHELF_SW)
1270 && ((retry_count & 0xff) ==
1271 RC_STS_DEVICE_OFFLINE)) {
1272 metatask->status = CFI_MT_ERROR;
1273 metatask->taskinfo.bbrread.status =
1274 CFI_BBR_LUN_OFFLINE_RC;
1276 metatask->status = CFI_MT_ERROR;
1277 metatask->taskinfo.bbrread.status =
1281 #endif /* NEEDTOPORT */
1282 metatask->status = CFI_MT_ERROR;
1283 metatask->taskinfo.bbrread.status =
1289 metatask->status = CFI_MT_ERROR;
1290 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1298 cfi_metatask_io_done(union ctl_io *io)
1300 struct cfi_lun_io *lun_io;
1301 struct cfi_metatask *metatask;
1302 struct cfi_softc *softc;
1303 struct cfi_lun *lun;
1305 lun_io = (struct cfi_lun_io *)
1306 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1311 metatask = lun_io->metatask;
1313 switch (metatask->tasktype) {
1314 case CFI_TASK_STARTUP:
1315 case CFI_TASK_SHUTDOWN: {
1316 int failed, done, is_start;
1320 if (metatask->tasktype == CFI_TASK_STARTUP)
1325 mtx_lock(&softc->lock);
1326 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
1327 metatask->taskinfo.startstop.luns_complete++;
1329 metatask->taskinfo.startstop.luns_failed++;
1332 if ((metatask->taskinfo.startstop.luns_complete +
1333 metatask->taskinfo.startstop.luns_failed) >=
1334 metatask->taskinfo.startstop.total_luns)
1337 mtx_unlock(&softc->lock);
1340 printf("%s: LUN %d %s request failed\n", __func__,
1341 lun_io->lun->lun_id, (is_start == 1) ? "start" :
1343 ctl_io_error_print(io, &lun_io->lun->inq_data);
1346 if (metatask->taskinfo.startstop.luns_failed > 0)
1347 metatask->status = CFI_MT_ERROR;
1349 metatask->status = CFI_MT_SUCCESS;
1350 cfi_metatask_done(softc, metatask);
1352 mtx_lock(&softc->lock);
1353 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1354 mtx_unlock(&softc->lock);
1359 case CFI_TASK_BBRREAD: {
1361 * Translate the SCSI error into an enumeration.
1363 cfi_metatask_bbr_errorparse(metatask, io);
1365 mtx_lock(&softc->lock);
1366 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1367 mtx_unlock(&softc->lock);
1371 cfi_metatask_done(softc, metatask);
1376 * This shouldn't happen.
1378 mtx_lock(&softc->lock);
1379 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1380 mtx_unlock(&softc->lock);
1388 cfi_err_recovery_done(union ctl_io *io)
1390 struct cfi_lun_io *lun_io, *orig_lun_io;
1391 struct cfi_lun *lun;
1392 union ctl_io *orig_io;
1394 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1395 orig_lun_io = lun_io->orig_lun_io;
1396 orig_io = orig_lun_io->ctl_io;
1399 if (io->io_hdr.status != CTL_SUCCESS) {
1400 printf("%s: error recovery action failed. Original "
1401 "error:\n", __func__);
1403 ctl_io_error_print(orig_lun_io->ctl_io, &lun->inq_data);
1405 printf("%s: error from error recovery action:\n", __func__);
1407 ctl_io_error_print(io, &lun->inq_data);
1409 printf("%s: trying original command again...\n", __func__);
1412 mtx_lock(&lun->softc->lock);
1413 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1414 mtx_unlock(&lun->softc->lock);
1417 orig_io->io_hdr.retries--;
1418 orig_io->io_hdr.status = CTL_STATUS_NONE;
1420 if (ctl_queue(orig_io) != CTL_RETVAL_COMPLETE) {
1421 printf("%s: error returned from ctl_queue()!\n", __func__);
1422 STAILQ_REMOVE(&lun->io_list, orig_lun_io,
1424 ctl_free_io(orig_io);
1429 cfi_lun_io_done(union ctl_io *io)
1431 struct cfi_lun *lun;
1432 struct cfi_lun_io *lun_io;
1434 lun_io = (struct cfi_lun_io *)
1435 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1438 if (lun_io->metatask == NULL) {
1439 printf("%s: I/O has no metatask pointer, discarding\n",
1441 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1445 cfi_metatask_io_done(io);
1449 cfi_action(struct cfi_metatask *metatask)
1451 struct cfi_softc *softc;
1453 softc = &fetd_internal_softc;
1455 mtx_lock(&softc->lock);
1457 STAILQ_INSERT_TAIL(&softc->metatask_list, metatask, links);
1459 if ((softc->flags & CFI_ONLINE) == 0) {
1460 mtx_unlock(&softc->lock);
1461 metatask->status = CFI_MT_PORT_OFFLINE;
1462 cfi_metatask_done(softc, metatask);
1465 mtx_unlock(&softc->lock);
1467 switch (metatask->tasktype) {
1468 case CFI_TASK_STARTUP:
1469 case CFI_TASK_SHUTDOWN: {
1471 int da_luns, ios_allocated, do_start;
1472 struct cfi_lun *lun;
1473 STAILQ_HEAD(, ctl_io_hdr) tmp_io_list;
1477 STAILQ_INIT(&tmp_io_list);
1479 if (metatask->tasktype == CFI_TASK_STARTUP)
1484 mtx_lock(&softc->lock);
1485 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1486 if (lun->state != CFI_LUN_READY)
1489 if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1492 io = ctl_alloc_io(softc->fe.ctl_pool_ref);
1495 STAILQ_INSERT_TAIL(&tmp_io_list, &io->io_hdr,
1500 if (ios_allocated < da_luns) {
1501 printf("%s: error allocating ctl_io for %s\n",
1502 __func__, (do_start == 1) ? "startup" :
1504 da_luns = ios_allocated;
1507 metatask->taskinfo.startstop.total_luns = da_luns;
1509 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1510 struct cfi_lun_io *lun_io;
1512 if (lun->state != CFI_LUN_READY)
1515 if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1518 io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1522 STAILQ_REMOVE(&tmp_io_list, &io->io_hdr, ctl_io_hdr,
1525 ctl_scsi_start_stop(io,
1529 /*power_conditions*/
1532 /*ctl_tag_type*/ CTL_TAG_ORDERED,
1537 /*metatask*/ metatask,
1538 /*policy*/ CFI_ERR_HARD,
1540 /*orig_lun_io*/ NULL,
1541 /*done_function*/ cfi_lun_io_done);
1543 lun_io = (struct cfi_lun_io *) io->io_hdr.port_priv;
1545 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1547 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1548 printf("%s: error returned from ctl_queue()!\n",
1550 STAILQ_REMOVE(&lun->io_list, lun_io,
1553 metatask->taskinfo.startstop.total_luns--;
1557 if (STAILQ_FIRST(&tmp_io_list) != NULL) {
1558 printf("%s: error: tmp_io_list != NULL\n", __func__);
1559 for (io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1561 io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list)) {
1562 STAILQ_REMOVE(&tmp_io_list, &io->io_hdr,
1567 mtx_unlock(&softc->lock);
1571 case CFI_TASK_BBRREAD: {
1573 struct cfi_lun *lun;
1574 struct cfi_lun_io *lun_io;
1575 cfi_bbrread_status status;
1577 uint32_t num_blocks;
1579 status = CFI_BBR_SUCCESS;
1581 req_lun_num = metatask->taskinfo.bbrread.lun_num;
1583 mtx_lock(&softc->lock);
1584 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1585 if (lun->lun_id != req_lun_num)
1587 if (lun->state != CFI_LUN_READY) {
1588 status = CFI_BBR_LUN_UNCONFIG;
1595 status = CFI_BBR_NO_LUN;
1597 if (status != CFI_BBR_SUCCESS) {
1598 metatask->status = CFI_MT_ERROR;
1599 metatask->taskinfo.bbrread.status = status;
1600 mtx_unlock(&softc->lock);
1601 cfi_metatask_done(softc, metatask);
1606 * Convert the number of bytes given into blocks and check
1607 * that the number of bytes is a multiple of the blocksize.
1608 * CTL will verify that the LBA is okay.
1610 if (lun->blocksize_powerof2 != 0) {
1611 if ((metatask->taskinfo.bbrread.len &
1612 (lun->blocksize - 1)) != 0) {
1613 metatask->status = CFI_MT_ERROR;
1614 metatask->taskinfo.bbrread.status =
1616 cfi_metatask_done(softc, metatask);
1620 num_blocks = metatask->taskinfo.bbrread.len >>
1621 lun->blocksize_powerof2;
1624 * XXX KDM this could result in floating point
1625 * division, which isn't supported in the kernel on
1628 if ((metatask->taskinfo.bbrread.len %
1629 lun->blocksize) != 0) {
1630 metatask->status = CFI_MT_ERROR;
1631 metatask->taskinfo.bbrread.status =
1633 cfi_metatask_done(softc, metatask);
1638 * XXX KDM this could result in floating point
1639 * division in some cases.
1641 num_blocks = metatask->taskinfo.bbrread.len /
1646 io = ctl_alloc_io(softc->fe.ctl_pool_ref);
1648 metatask->status = CFI_MT_ERROR;
1649 metatask->taskinfo.bbrread.status = CFI_BBR_NO_MEM;
1650 mtx_unlock(&softc->lock);
1651 cfi_metatask_done(softc, metatask);
1656 * XXX KDM need to do a read capacity to get the blocksize
1659 ctl_scsi_read_write(io,
1661 /*data_len*/ metatask->taskinfo.bbrread.len,
1664 /*minimum_cdb_size*/ 0,
1665 /*lba*/ metatask->taskinfo.bbrread.lba,
1666 /*num_blocks*/ num_blocks,
1667 /*tag_type*/ CTL_TAG_SIMPLE,
1672 /*metatask*/ metatask,
1673 /*policy*/ CFI_ERR_SOFT,
1675 /*orig_lun_io*/ NULL,
1676 /*done_function*/ cfi_lun_io_done);
1678 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1680 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1682 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1683 printf("%s: error returned from ctl_queue()!\n",
1685 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1687 metatask->status = CFI_MT_ERROR;
1688 metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1689 mtx_unlock(&softc->lock);
1690 cfi_metatask_done(softc, metatask);
1694 mtx_unlock(&softc->lock);
1698 panic("invalid metatask type %d", metatask->tasktype);
1699 break; /* NOTREACHED */
1705 cfi_shutdown_shelf(cfi_cb_t callback, void *callback_arg)
1707 struct ctl_mem_element *element;
1708 struct cfi_softc *softc;
1709 struct cfi_metatask *metatask;
1711 softc = &fetd_internal_softc;
1713 element = ctl_alloc_mem_element(&softc->metatask_pool, /*can_wait*/ 0);
1714 if (element == NULL) {
1715 callback(callback_arg,
1716 /*status*/ CFI_MT_ERROR,
1718 /*sluns_complete*/ 0,
1719 /*sluns_failed*/ 0);
1723 metatask = (struct cfi_metatask *)element->bytes;
1725 memset(metatask, 0, sizeof(*metatask));
1726 metatask->tasktype = CFI_TASK_SHUTDOWN;
1727 metatask->status = CFI_MT_NONE;
1728 metatask->taskinfo.startstop.callback = callback;
1729 metatask->taskinfo.startstop.callback_arg = callback_arg;
1730 metatask->element = element;
1732 cfi_action(softc, metatask);
1735 * - send a report luns to lun 0, get LUN list.
1736 * - send an inquiry to each lun
1737 * - send a stop/offline to each direct access LUN
1738 * - if we get a reservation conflict, reset the LUN and then
1739 * retry sending the stop/offline
1740 * - return status back to the caller
1745 cfi_start_shelf(cfi_cb_t callback, void *callback_arg)
1747 struct ctl_mem_element *element;
1748 struct cfi_softc *softc;
1749 struct cfi_metatask *metatask;
1751 softc = &fetd_internal_softc;
1753 element = ctl_alloc_mem_element(&softc->metatask_pool, /*can_wait*/ 0);
1754 if (element == NULL) {
1755 callback(callback_arg,
1756 /*status*/ CFI_MT_ERROR,
1758 /*sluns_complete*/ 0,
1759 /*sluns_failed*/ 0);
1763 metatask = (struct cfi_metatask *)element->bytes;
1765 memset(metatask, 0, sizeof(*metatask));
1766 metatask->tasktype = CFI_TASK_STARTUP;
1767 metatask->status = CFI_MT_NONE;
1768 metatask->taskinfo.startstop.callback = callback;
1769 metatask->taskinfo.startstop.callback_arg = callback_arg;
1770 metatask->element = element;
1772 cfi_action(softc, metatask);
1775 * - send a report luns to lun 0, get LUN list.
1776 * - send an inquiry to each lun
1777 * - send a stop/offline to each direct access LUN
1778 * - if we get a reservation conflict, reset the LUN and then
1779 * retry sending the stop/offline
1780 * - return status back to the caller
1786 struct cfi_metatask *
1787 cfi_alloc_metatask(int can_wait)
1789 struct ctl_mem_element *element;
1790 struct cfi_metatask *metatask;
1791 struct cfi_softc *softc;
1793 softc = &fetd_internal_softc;
1795 element = ctl_alloc_mem_element(&softc->metatask_pool, can_wait);
1796 if (element == NULL)
1799 metatask = (struct cfi_metatask *)element->bytes;
1800 memset(metatask, 0, sizeof(*metatask));
1801 metatask->status = CFI_MT_NONE;
1802 metatask->element = element;
1808 cfi_free_metatask(struct cfi_metatask *metatask)
1810 ctl_free_mem_element(metatask->element);