2 * Copyright (c) 2004, 2005 Silicon Graphics International Corp.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * substantially similar to the "NO WARRANTY" disclaimer below
13 * ("Disclaimer") and any redistribution must be conditioned upon
14 * including a substantially similar Disclaimer requirement for further
15 * binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_internal.c#5 $
33 * CTL kernel internal frontend target driver. This allows kernel-level
34 * clients to send commands into CTL.
36 * This has elements of a FETD (e.g. it has to set tag numbers, initiator,
37 * port, target, and LUN) and elements of an initiator (LUN discovery and
38 * probing, error recovery, command initiation). Even though this has some
39 * initiator type elements, this is not intended to be a full fledged
40 * initiator layer. It is only intended to send a limited number of
41 * commands to a well known target layer.
43 * To be able to fulfill the role of a full initiator layer, it would need
44 * a whole lot more functionality.
46 * Author: Ken Merry <ken@FreeBSD.org>
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/types.h>
57 #include <sys/malloc.h>
59 #include <sys/mutex.h>
60 #include <sys/condvar.h>
61 #include <sys/queue.h>
63 #include <sys/sysctl.h>
64 #include <cam/scsi/scsi_all.h>
65 #include <cam/scsi/scsi_da.h>
66 #include <cam/ctl/ctl_io.h>
67 #include <cam/ctl/ctl.h>
68 #include <cam/ctl/ctl_frontend.h>
69 #include <cam/ctl/ctl_frontend_internal.h>
70 #include <cam/ctl/ctl_backend.h>
71 #include <cam/ctl/ctl_ioctl.h>
72 #include <cam/ctl/ctl_util.h>
73 #include <cam/ctl/ctl_ha.h>
74 #include <cam/ctl/ctl_private.h>
75 #include <cam/ctl/ctl_mem_pool.h>
76 #include <cam/ctl/ctl_debug.h>
77 #include <cam/ctl/ctl_scsi_all.h>
78 #include <cam/ctl/ctl_error.h>
82 * - overall metatask, different potential metatask types (e.g. forced
83 * shutdown, gentle shutdown)
84 * - forced shutdown metatask:
85 * - states: report luns, pending, done?
86 * - list of luns pending, with the relevant I/O for that lun attached.
87 * This would allow moving ahead on LUNs with no errors, and going
88 * into error recovery on LUNs with problems. Per-LUN states might
89 * include inquiry, stop/offline, done.
91 * Use LUN enable for LUN list instead of getting it manually? We'd still
92 * need inquiry data for each LUN.
94 * How to handle processor LUN w.r.t. found/stopped counts?
103 struct cfi_task_startstop {
109 /* XXX KDM add more fields here */
113 struct cfi_task_startstop startstop;
116 struct cfi_metatask {
117 cfi_tasktype tasktype;
118 cfi_mt_status status;
119 union cfi_taskinfo taskinfo;
120 struct ctl_mem_element *element;
122 STAILQ_ENTRY(cfi_metatask) links;
127 CFI_ERR_RETRY = 0x000,
128 CFI_ERR_FAIL = 0x001,
129 CFI_ERR_LUN_RESET = 0x002,
130 CFI_ERR_MASK = 0x0ff,
131 CFI_ERR_NO_DECREMENT = 0x100
141 CFI_LUN_READCAPACITY,
142 CFI_LUN_READCAPACITY_16,
147 struct ctl_id target_id;
149 struct scsi_inquiry_data inq_data;
152 int blocksize_powerof2;
153 uint32_t cur_tag_num;
155 struct ctl_mem_element *element;
156 struct cfi_softc *softc;
157 STAILQ_HEAD(, cfi_lun_io) io_list;
158 STAILQ_ENTRY(cfi_lun) links;
163 struct cfi_metatask *metatask;
164 cfi_error_policy policy;
165 void (*done_function)(union ctl_io *io);
166 union ctl_io *ctl_io;
167 struct cfi_lun_io *orig_lun_io;
168 STAILQ_ENTRY(cfi_lun_io) links;
177 struct ctl_frontend fe;
181 STAILQ_HEAD(, cfi_lun) lun_list;
182 STAILQ_HEAD(, cfi_metatask) metatask_list;
183 struct ctl_mem_pool lun_pool;
184 struct ctl_mem_pool metatask_pool;
187 MALLOC_DEFINE(M_CTL_CFI, "ctlcfi", "CTL CFI");
189 static struct cfi_softc fetd_internal_softc;
190 extern int ctl_disable;
193 void cfi_shutdown(void) __unused;
194 static void cfi_online(void *arg);
195 static void cfi_offline(void *arg);
196 static int cfi_targ_enable(void *arg, struct ctl_id targ_id);
197 static int cfi_targ_disable(void *arg, struct ctl_id targ_id);
198 static int cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id);
199 static int cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id);
200 static void cfi_datamove(union ctl_io *io);
201 static cfi_error_action cfi_checkcond_parse(union ctl_io *io,
202 struct cfi_lun_io *lun_io);
203 static cfi_error_action cfi_error_parse(union ctl_io *io,
204 struct cfi_lun_io *lun_io);
205 static void cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
206 struct cfi_metatask *metatask, cfi_error_policy policy,
207 int retries, struct cfi_lun_io *orig_lun_io,
208 void (*done_function)(union ctl_io *io));
209 static void cfi_done(union ctl_io *io);
210 static void cfi_lun_probe_done(union ctl_io *io);
211 static void cfi_lun_probe(struct cfi_lun *lun, int have_lock);
212 static void cfi_metatask_done(struct cfi_softc *softc,
213 struct cfi_metatask *metatask);
214 static void cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask,
216 static void cfi_metatask_io_done(union ctl_io *io);
217 static void cfi_err_recovery_done(union ctl_io *io);
218 static void cfi_lun_io_done(union ctl_io *io);
220 SYSINIT(cfi_init, SI_SUB_CONFIGURE, SI_ORDER_FOURTH, cfi_init, NULL);
225 struct cfi_softc *softc;
226 struct ctl_frontend *fe;
229 softc = &fetd_internal_softc;
235 /* If we're disabled, don't initialize */
236 if (ctl_disable != 0)
239 if (sizeof(struct cfi_lun_io) > CTL_PORT_PRIV_SIZE) {
240 printf("%s: size of struct cfi_lun_io %zd > "
241 "CTL_PORT_PRIV_SIZE %d\n", __func__,
242 sizeof(struct cfi_lun_io),
245 memset(softc, 0, sizeof(softc));
247 mtx_init(&softc->lock, "CTL frontend mutex", NULL, MTX_DEF);
248 softc->flags |= CTL_FLAG_MASTER_SHELF;
250 STAILQ_INIT(&softc->lun_list);
251 STAILQ_INIT(&softc->metatask_list);
252 sprintf(softc->fe_name, "CTL internal");
253 fe->port_type = CTL_PORT_INTERNAL;
254 fe->num_requested_ctl_io = 100;
255 fe->port_name = softc->fe_name;
256 fe->port_online = cfi_online;
257 fe->port_offline = cfi_offline;
258 fe->onoff_arg = softc;
259 fe->targ_enable = cfi_targ_enable;
260 fe->targ_disable = cfi_targ_disable;
261 fe->lun_enable = cfi_lun_enable;
262 fe->lun_disable = cfi_lun_disable;
263 fe->targ_lun_arg = softc;
264 fe->fe_datamove = cfi_datamove;
265 fe->fe_done = cfi_done;
266 fe->max_targets = 15;
267 fe->max_target_id = 15;
269 if (ctl_frontend_register(fe, (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0)
271 printf("%s: internal frontend registration failed\n", __func__);
276 if (ctl_init_mem_pool(&softc->lun_pool,
277 sizeof(struct cfi_lun),
278 CTL_MEM_POOL_PERM_GROW, /*grow_inc*/ 3,
279 /* initial_pool_size */ CTL_MAX_LUNS) != 0) {
280 printf("%s: can't initialize LUN memory pool\n", __func__);
285 if (ctl_init_mem_pool(&softc->metatask_pool,
286 sizeof(struct cfi_metatask),
287 CTL_MEM_POOL_PERM_GROW, /*grow_inc*/ 3,
288 /*initial_pool_size*/ 10) != 0) {
289 printf("%s: can't initialize metatask memory pool\n", __func__);
301 ctl_shrink_mem_pool(&softc->metatask_pool);
304 ctl_shrink_mem_pool(&softc->lun_pool);
307 ctl_frontend_deregister(fe);
317 struct cfi_softc *softc;
319 softc = &fetd_internal_softc;
322 * XXX KDM need to clear out any I/O pending on each LUN.
324 if (ctl_frontend_deregister(&softc->fe) != 0)
325 printf("%s: ctl_frontend_deregister() failed\n", __func__);
327 if (ctl_shrink_mem_pool(&softc->lun_pool) != 0)
328 printf("%s: error shrinking LUN pool\n", __func__);
330 if (ctl_shrink_mem_pool(&softc->metatask_pool) != 0)
331 printf("%s: error shrinking LUN pool\n", __func__);
335 cfi_online(void *arg)
337 struct cfi_softc *softc;
340 softc = (struct cfi_softc *)arg;
342 softc->flags |= CFI_ONLINE;
345 * Go through and kick off the probe for each lun. Should we check
346 * the LUN flags here to determine whether or not to probe it?
348 mtx_lock(&softc->lock);
349 STAILQ_FOREACH(lun, &softc->lun_list, links)
350 cfi_lun_probe(lun, /*have_lock*/ 1);
351 mtx_unlock(&softc->lock);
355 cfi_offline(void *arg)
357 struct cfi_softc *softc;
359 softc = (struct cfi_softc *)arg;
361 softc->flags &= ~CFI_ONLINE;
365 cfi_targ_enable(void *arg, struct ctl_id targ_id)
371 cfi_targ_disable(void *arg, struct ctl_id targ_id)
377 cfi_lun_enable(void *arg, struct ctl_id target_id, int lun_id)
379 struct ctl_mem_element *element;
380 struct cfi_softc *softc;
384 softc = (struct cfi_softc *)arg;
387 mtx_lock(&softc->lock);
388 STAILQ_FOREACH(lun, &softc->lun_list, links) {
389 if ((lun->target_id.id == target_id.id)
390 && (lun->lun_id == lun_id)) {
395 mtx_unlock(&softc->lock);
398 * If we already have this target/LUN, there is no reason to add
399 * it to our lists again.
404 element = ctl_alloc_mem_element(&softc->lun_pool, /*can_wait*/ 0);
406 if (element == NULL) {
407 printf("%s: unable to allocate LUN structure\n", __func__);
411 lun = (struct cfi_lun *)element->bytes;
413 lun->element = element;
414 lun->target_id = target_id;
415 lun->lun_id = lun_id;
416 lun->cur_tag_num = 0;
417 lun->state = CFI_LUN_INQUIRY;
419 STAILQ_INIT(&lun->io_list);
421 mtx_lock(&softc->lock);
422 STAILQ_INSERT_TAIL(&softc->lun_list, lun, links);
423 mtx_unlock(&softc->lock);
425 cfi_lun_probe(lun, /*have_lock*/ 0);
431 cfi_lun_disable(void *arg, struct ctl_id target_id, int lun_id)
433 struct cfi_softc *softc;
437 softc = (struct cfi_softc *)arg;
442 * XXX KDM need to do an invalidate and then a free when any
443 * pending I/O has completed. Or do we? CTL won't free a LUN
444 * while any I/O is pending. So we won't get this notification
445 * unless any I/O we have pending on a LUN has completed.
447 mtx_lock(&softc->lock);
448 STAILQ_FOREACH(lun, &softc->lun_list, links) {
449 if ((lun->target_id.id == target_id.id)
450 && (lun->lun_id == lun_id)) {
456 STAILQ_REMOVE(&softc->lun_list, lun, cfi_lun, links);
458 mtx_unlock(&softc->lock);
461 printf("%s: can't find target %ju lun %d\n", __func__,
462 (uintmax_t)target_id.id, lun_id);
466 ctl_free_mem_element(lun->element);
472 * XXX KDM run this inside a thread, or inside the caller's context?
475 cfi_datamove(union ctl_io *io)
477 struct ctl_sg_entry *ext_sglist, *kern_sglist;
478 struct ctl_sg_entry ext_entry, kern_entry;
479 int ext_sglen, ext_sg_entries, kern_sg_entries;
480 int ext_sg_start, ext_offset;
481 int len_to_copy, len_copied;
482 int kern_watermark, ext_watermark;
483 int ext_sglist_malloced;
484 struct ctl_scsiio *ctsio;
487 ext_sglist_malloced = 0;
492 CTL_DEBUG_PRINT(("%s\n", __func__));
497 * If this is the case, we're probably doing a BBR read and don't
498 * actually need to transfer the data. This will effectively
499 * bit-bucket the data.
501 if (ctsio->ext_data_ptr == NULL)
505 * To simplify things here, if we have a single buffer, stick it in
506 * a S/G entry and just make it a single entry S/G list.
508 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
511 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
514 * XXX KDM GFP_KERNEL, don't know what the caller's context
515 * is. Need to figure that out.
517 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL_CFI,
519 if (ext_sglist == NULL) {
520 ctl_set_internal_failure(ctsio,
525 ext_sglist_malloced = 1;
526 if (memcpy(ext_sglist, ctsio->ext_data_ptr, ext_sglen) != 0) {
527 ctl_set_internal_failure(ctsio,
532 ext_sg_entries = ctsio->ext_sg_entries;
534 for (i = 0; i < ext_sg_entries; i++) {
535 if ((len_seen + ext_sglist[i].len) >=
536 ctsio->ext_data_filled) {
538 ext_offset = ctsio->ext_data_filled - len_seen;
541 len_seen += ext_sglist[i].len;
544 ext_sglist = &ext_entry;
545 ext_sglist->addr = ctsio->ext_data_ptr;
546 ext_sglist->len = ctsio->ext_data_len;
549 ext_offset = ctsio->ext_data_filled;
552 if (ctsio->kern_sg_entries > 0) {
553 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
554 kern_sg_entries = ctsio->kern_sg_entries;
556 kern_sglist = &kern_entry;
557 kern_sglist->addr = ctsio->kern_data_ptr;
558 kern_sglist->len = ctsio->kern_data_len;
564 ext_watermark = ext_offset;
566 for (i = ext_sg_start, j = 0;
567 i < ext_sg_entries && j < kern_sg_entries;) {
568 uint8_t *ext_ptr, *kern_ptr;
570 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark,
571 kern_sglist[j].len - kern_watermark);
573 ext_ptr = (uint8_t *)ext_sglist[i].addr;
574 ext_ptr = ext_ptr + ext_watermark;
575 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
579 panic("need to implement bus address support");
581 kern_ptr = bus_to_virt(kern_sglist[j].addr);
584 kern_ptr = (uint8_t *)kern_sglist[j].addr;
585 kern_ptr = kern_ptr + kern_watermark;
587 kern_watermark += len_to_copy;
588 ext_watermark += len_to_copy;
590 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
592 CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n",
593 __func__, len_to_copy));
594 CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
596 memcpy(ext_ptr, kern_ptr, len_to_copy);
598 CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n",
599 __func__, len_to_copy));
600 CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__,
602 memcpy(kern_ptr, ext_ptr, len_to_copy);
605 len_copied += len_to_copy;
607 if (ext_sglist[i].len == ext_watermark) {
612 if (kern_sglist[j].len == kern_watermark) {
618 ctsio->ext_data_filled += len_copied;
620 CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n",
621 __func__, ext_sg_entries, kern_sg_entries));
622 CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n",
623 __func__, ctsio->ext_data_len, ctsio->kern_data_len));
626 /* XXX KDM set residual?? */
629 if (ext_sglist_malloced != 0)
630 free(ext_sglist, M_CTL_CFI);
632 io->scsiio.be_move_done(io);
638 * For any sort of check condition, busy, etc., we just retry. We do not
639 * decrement the retry count for unit attention type errors. These are
640 * normal, and we want to save the retry count for "real" errors. Otherwise,
641 * we could end up with situations where a command will succeed in some
642 * situations and fail in others, depending on whether a unit attention is
643 * pending. Also, some of our error recovery actions, most notably the
644 * LUN reset action, will cause a unit attention.
646 * We can add more detail here later if necessary.
648 static cfi_error_action
649 cfi_checkcond_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
651 cfi_error_action error_action;
652 int error_code, sense_key, asc, ascq;
655 * Default to retrying the command.
657 error_action = CFI_ERR_RETRY;
659 scsi_extract_sense_len(&io->scsiio.sense_data,
660 io->scsiio.sense_len,
667 switch (error_code) {
668 case SSD_DEFERRED_ERROR:
669 case SSD_DESC_DEFERRED_ERROR:
670 error_action |= CFI_ERR_NO_DECREMENT;
672 case SSD_CURRENT_ERROR:
673 case SSD_DESC_CURRENT_ERROR:
676 case SSD_KEY_UNIT_ATTENTION:
677 error_action |= CFI_ERR_NO_DECREMENT;
679 case SSD_KEY_HARDWARE_ERROR:
681 * This is our generic "something bad happened"
682 * error code. It often isn't recoverable.
684 if ((asc == 0x44) && (ascq == 0x00))
685 error_action = CFI_ERR_FAIL;
687 case SSD_KEY_NOT_READY:
689 * If the LUN is powered down, there likely isn't
690 * much point in retrying right now.
692 if ((asc == 0x04) && (ascq == 0x02))
693 error_action = CFI_ERR_FAIL;
695 * If the LUN is offline, there probably isn't much
696 * point in retrying, either.
698 if ((asc == 0x04) && (ascq == 0x03))
699 error_action = CFI_ERR_FAIL;
705 return (error_action);
708 static cfi_error_action
709 cfi_error_parse(union ctl_io *io, struct cfi_lun_io *lun_io)
711 cfi_error_action error_action;
713 error_action = CFI_ERR_RETRY;
715 switch (io->io_hdr.io_type) {
717 switch (io->io_hdr.status & CTL_STATUS_MASK) {
719 switch (io->scsiio.scsi_status) {
720 case SCSI_STATUS_RESERV_CONFLICT:
722 * For a reservation conflict, we'll usually
723 * want the hard error recovery policy, so
724 * we'll reset the LUN.
726 if (lun_io->policy == CFI_ERR_HARD)
733 case SCSI_STATUS_CHECK_COND:
735 error_action = cfi_checkcond_parse(io, lun_io);
740 error_action = CFI_ERR_RETRY;
746 * In theory task management commands shouldn't fail...
748 error_action = CFI_ERR_RETRY;
751 printf("%s: invalid ctl_io type %d\n", __func__,
753 panic("%s: invalid ctl_io type %d\n", __func__,
758 return (error_action);
762 cfi_init_io(union ctl_io *io, struct cfi_lun *lun,
763 struct cfi_metatask *metatask, cfi_error_policy policy, int retries,
764 struct cfi_lun_io *orig_lun_io,
765 void (*done_function)(union ctl_io *io))
767 struct cfi_lun_io *lun_io;
769 io->io_hdr.nexus.initid.id = 7;
770 io->io_hdr.nexus.targ_port = lun->softc->fe.targ_port;
771 io->io_hdr.nexus.targ_target.id = lun->target_id.id;
772 io->io_hdr.nexus.targ_lun = lun->lun_id;
773 io->io_hdr.retries = retries;
774 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
775 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = lun_io;
777 lun_io->metatask = metatask;
779 lun_io->policy = policy;
780 lun_io->orig_lun_io = orig_lun_io;
781 lun_io->done_function = done_function;
783 * We only set the tag number for SCSI I/Os. For task management
784 * commands, the tag number is only really needed for aborts, so
785 * the caller can set it if necessary.
787 switch (io->io_hdr.io_type) {
789 io->scsiio.tag_num = lun->cur_tag_num++;
798 cfi_done(union ctl_io *io)
800 struct cfi_lun_io *lun_io;
801 struct cfi_softc *softc;
804 lun_io = (struct cfi_lun_io *)
805 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
811 * Very minimal retry logic. We basically retry if we got an error
812 * back, and the retry count is greater than 0. If we ever want
813 * more sophisticated initiator type behavior, the CAM error
814 * recovery code in ../common might be helpful.
816 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)
817 && (io->io_hdr.retries > 0)) {
818 ctl_io_status old_status;
819 cfi_error_action error_action;
821 error_action = cfi_error_parse(io, lun_io);
823 switch (error_action & CFI_ERR_MASK) {
826 break; /* NOTREACHED */
827 case CFI_ERR_LUN_RESET: {
828 union ctl_io *new_io;
829 struct cfi_lun_io *new_lun_io;
831 new_io = ctl_alloc_io(softc->fe.ctl_pool_ref);
832 if (new_io == NULL) {
833 printf("%s: unable to allocate ctl_io for "
834 "error recovery\n", __func__);
839 new_io->io_hdr.io_type = CTL_IO_TASK;
840 new_io->taskio.task_action = CTL_TASK_LUN_RESET;
845 /*policy*/ CFI_ERR_SOFT,
847 /*orig_lun_io*/lun_io,
848 /*done_function*/ cfi_err_recovery_done);
851 new_lun_io = (struct cfi_lun_io *)
852 new_io->io_hdr.port_priv;
854 mtx_lock(&lun->softc->lock);
855 STAILQ_INSERT_TAIL(&lun->io_list, new_lun_io, links);
856 mtx_unlock(&lun->softc->lock);
863 if ((error_action & CFI_ERR_NO_DECREMENT) == 0)
864 io->io_hdr.retries--;
868 old_status = io->io_hdr.status;
869 io->io_hdr.status = CTL_STATUS_NONE;
871 io->io_hdr.flags &= ~CTL_FLAG_ALREADY_DONE;
873 io->io_hdr.flags &= ~CTL_FLAG_ABORT;
874 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
876 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
877 printf("%s: error returned from ctl_queue()!\n",
879 io->io_hdr.status = old_status;
884 lun_io->done_function(io);
888 cfi_lun_probe_done(union ctl_io *io)
891 struct cfi_lun_io *lun_io;
893 lun_io = (struct cfi_lun_io *)
894 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
897 switch (lun->state) {
898 case CFI_LUN_INQUIRY: {
899 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
900 /* print out something here?? */
901 printf("%s: LUN %d probe failed because inquiry "
902 "failed\n", __func__, lun->lun_id);
903 ctl_io_error_print(io, NULL);
906 if (SID_TYPE(&lun->inq_data) != T_DIRECT) {
909 lun->state = CFI_LUN_READY;
910 ctl_scsi_path_string(io, path_str,
912 printf("%s", path_str);
913 scsi_print_inquiry(&lun->inq_data);
915 lun->state = CFI_LUN_READCAPACITY;
916 cfi_lun_probe(lun, /*have_lock*/ 0);
919 mtx_lock(&lun->softc->lock);
920 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
921 mtx_unlock(&lun->softc->lock);
925 case CFI_LUN_READCAPACITY:
926 case CFI_LUN_READCAPACITY_16: {
933 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
934 printf("%s: LUN %d probe failed because READ CAPACITY "
935 "failed\n", __func__, lun->lun_id);
936 ctl_io_error_print(io, NULL);
939 if (lun->state == CFI_LUN_READCAPACITY) {
940 struct scsi_read_capacity_data *rdcap;
942 rdcap = (struct scsi_read_capacity_data *)
943 io->scsiio.ext_data_ptr;
945 maxlba = scsi_4btoul(rdcap->addr);
946 blocksize = scsi_4btoul(rdcap->length);
947 if (blocksize == 0) {
948 printf("%s: LUN %d has invalid "
949 "blocksize 0, probe aborted\n",
950 __func__, lun->lun_id);
951 } else if (maxlba == 0xffffffff) {
952 lun->state = CFI_LUN_READCAPACITY_16;
953 cfi_lun_probe(lun, /*have_lock*/ 0);
955 lun->state = CFI_LUN_READY;
957 struct scsi_read_capacity_data_long *rdcap_long;
960 scsi_read_capacity_data_long *)
961 io->scsiio.ext_data_ptr;
962 maxlba = scsi_8btou64(rdcap_long->addr);
963 blocksize = scsi_4btoul(rdcap_long->length);
965 if (blocksize == 0) {
966 printf("%s: LUN %d has invalid "
967 "blocksize 0, probe aborted\n",
968 __func__, lun->lun_id);
970 lun->state = CFI_LUN_READY;
974 if (lun->state == CFI_LUN_READY) {
977 lun->num_blocks = maxlba + 1;
978 lun->blocksize = blocksize;
981 * If this is true, the blocksize is a power of 2.
982 * We already checked for 0 above.
984 if (((blocksize - 1) & blocksize) == 0) {
987 for (i = 0; i < 32; i++) {
988 if ((blocksize & (1 << i)) != 0) {
989 lun->blocksize_powerof2 = i;
994 ctl_scsi_path_string(io, path_str,sizeof(path_str));
995 printf("%s", path_str);
996 scsi_print_inquiry(&lun->inq_data);
997 printf("%s %ju blocks, blocksize %d\n", path_str,
998 (uintmax_t)maxlba + 1, blocksize);
1000 mtx_lock(&lun->softc->lock);
1001 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1002 mtx_unlock(&lun->softc->lock);
1003 free(io->scsiio.ext_data_ptr, M_CTL_CFI);
1009 mtx_lock(&lun->softc->lock);
1010 /* How did we get here?? */
1011 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1012 mtx_unlock(&lun->softc->lock);
1019 cfi_lun_probe(struct cfi_lun *lun, int have_lock)
1023 mtx_lock(&lun->softc->lock);
1024 if ((lun->softc->flags & CFI_ONLINE) == 0) {
1026 mtx_unlock(&lun->softc->lock);
1030 mtx_unlock(&lun->softc->lock);
1032 switch (lun->state) {
1033 case CFI_LUN_INQUIRY: {
1034 struct cfi_lun_io *lun_io;
1037 io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
1039 printf("%s: unable to alloc ctl_io for target %ju "
1040 "lun %d probe\n", __func__,
1041 (uintmax_t)lun->target_id.id, lun->lun_id);
1044 ctl_scsi_inquiry(io,
1045 /*data_ptr*/(uint8_t *)&lun->inq_data,
1046 /*data_len*/ sizeof(lun->inq_data),
1049 /*tag_type*/ CTL_TAG_SIMPLE,
1055 /*policy*/ CFI_ERR_SOFT,
1057 /*orig_lun_io*/ NULL,
1059 cfi_lun_probe_done);
1061 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1064 mtx_lock(&lun->softc->lock);
1065 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1067 mtx_unlock(&lun->softc->lock);
1069 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1070 printf("%s: error returned from ctl_queue()!\n",
1072 STAILQ_REMOVE(&lun->io_list, lun_io,
1078 case CFI_LUN_READCAPACITY:
1079 case CFI_LUN_READCAPACITY_16: {
1080 struct cfi_lun_io *lun_io;
1084 io = ctl_alloc_io(lun->softc->fe.ctl_pool_ref);
1086 printf("%s: unable to alloc ctl_io for target %ju "
1087 "lun %d probe\n", __func__,
1088 (uintmax_t)lun->target_id.id, lun->lun_id);
1092 dataptr = malloc(sizeof(struct scsi_read_capacity_data_long),
1093 M_CTL_CFI, M_NOWAIT);
1094 if (dataptr == NULL) {
1095 printf("%s: unable to allocate SCSI read capacity "
1096 "buffer for target %ju lun %d\n", __func__,
1097 (uintmax_t)lun->target_id.id, lun->lun_id);
1100 if (lun->state == CFI_LUN_READCAPACITY) {
1101 ctl_scsi_read_capacity(io,
1102 /*data_ptr*/ dataptr,
1104 sizeof(struct scsi_read_capacity_data_long),
1108 /*tag_type*/ CTL_TAG_SIMPLE,
1111 ctl_scsi_read_capacity_16(io,
1112 /*data_ptr*/ dataptr,
1114 sizeof(struct scsi_read_capacity_data_long),
1118 /*tag_type*/ CTL_TAG_SIMPLE,
1124 /*policy*/ CFI_ERR_SOFT,
1126 /*orig_lun_io*/ NULL,
1127 /*done_function*/ cfi_lun_probe_done);
1129 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1132 mtx_lock(&lun->softc->lock);
1133 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1135 mtx_unlock(&lun->softc->lock);
1137 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1138 printf("%s: error returned from ctl_queue()!\n",
1140 STAILQ_REMOVE(&lun->io_list, lun_io,
1142 free(dataptr, M_CTL_CFI);
1149 /* Why were we called? */
1155 cfi_metatask_done(struct cfi_softc *softc, struct cfi_metatask *metatask)
1157 mtx_lock(&softc->lock);
1158 STAILQ_REMOVE(&softc->metatask_list, metatask, cfi_metatask, links);
1159 mtx_unlock(&softc->lock);
1162 * Return status to the caller. Caller allocated storage, and is
1163 * responsible for calling cfi_free_metatask to release it once
1164 * they've seen the status.
1166 metatask->callback(metatask->callback_arg, metatask);
1170 cfi_metatask_bbr_errorparse(struct cfi_metatask *metatask, union ctl_io *io)
1172 int error_code, sense_key, asc, ascq;
1174 if (metatask->tasktype != CFI_TASK_BBRREAD)
1177 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
1178 metatask->status = CFI_MT_SUCCESS;
1179 metatask->taskinfo.bbrread.status = CFI_BBR_SUCCESS;
1183 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) {
1184 metatask->status = CFI_MT_ERROR;
1185 metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1189 metatask->taskinfo.bbrread.scsi_status = io->scsiio.scsi_status;
1190 memcpy(&metatask->taskinfo.bbrread.sense_data, &io->scsiio.sense_data,
1191 ctl_min(sizeof(metatask->taskinfo.bbrread.sense_data),
1192 sizeof(io->scsiio.sense_data)));
1194 if (io->scsiio.scsi_status == SCSI_STATUS_RESERV_CONFLICT) {
1195 metatask->status = CFI_MT_ERROR;
1196 metatask->taskinfo.bbrread.status = CFI_BBR_RESERV_CONFLICT;
1200 if (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) {
1201 metatask->status = CFI_MT_ERROR;
1202 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1206 scsi_extract_sense_len(&io->scsiio.sense_data,
1207 io->scsiio.sense_len,
1214 switch (error_code) {
1215 case SSD_DEFERRED_ERROR:
1216 case SSD_DESC_DEFERRED_ERROR:
1217 metatask->status = CFI_MT_ERROR;
1218 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1220 case SSD_CURRENT_ERROR:
1221 case SSD_DESC_CURRENT_ERROR:
1223 struct scsi_sense_data *sense;
1225 sense = &io->scsiio.sense_data;
1227 if ((asc == 0x04) && (ascq == 0x02)) {
1228 metatask->status = CFI_MT_ERROR;
1229 metatask->taskinfo.bbrread.status = CFI_BBR_LUN_STOPPED;
1230 } else if ((asc == 0x04) && (ascq == 0x03)) {
1231 metatask->status = CFI_MT_ERROR;
1232 metatask->taskinfo.bbrread.status =
1233 CFI_BBR_LUN_OFFLINE_CTL;
1234 } else if ((asc == 0x44) && (ascq == 0x00)) {
1236 if (sense->sense_key_spec[0] & SSD_SCS_VALID) {
1237 uint16_t retry_count;
1239 retry_count = sense->sense_key_spec[1] << 8 |
1240 sense->sense_key_spec[2];
1241 if (((retry_count & 0xf000) == CSC_RAIDCORE)
1242 && ((retry_count & 0x0f00) == CSC_SHELF_SW)
1243 && ((retry_count & 0xff) ==
1244 RC_STS_DEVICE_OFFLINE)) {
1245 metatask->status = CFI_MT_ERROR;
1246 metatask->taskinfo.bbrread.status =
1247 CFI_BBR_LUN_OFFLINE_RC;
1249 metatask->status = CFI_MT_ERROR;
1250 metatask->taskinfo.bbrread.status =
1254 #endif /* NEEDTOPORT */
1255 metatask->status = CFI_MT_ERROR;
1256 metatask->taskinfo.bbrread.status =
1262 metatask->status = CFI_MT_ERROR;
1263 metatask->taskinfo.bbrread.status = CFI_BBR_SCSI_ERROR;
1271 cfi_metatask_io_done(union ctl_io *io)
1273 struct cfi_lun_io *lun_io;
1274 struct cfi_metatask *metatask;
1275 struct cfi_softc *softc;
1276 struct cfi_lun *lun;
1278 lun_io = (struct cfi_lun_io *)
1279 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1284 metatask = lun_io->metatask;
1286 switch (metatask->tasktype) {
1287 case CFI_TASK_STARTUP:
1288 case CFI_TASK_SHUTDOWN: {
1289 int failed, done, is_start;
1293 if (metatask->tasktype == CFI_TASK_STARTUP)
1298 mtx_lock(&softc->lock);
1299 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
1300 metatask->taskinfo.startstop.luns_complete++;
1302 metatask->taskinfo.startstop.luns_failed++;
1305 if ((metatask->taskinfo.startstop.luns_complete +
1306 metatask->taskinfo.startstop.luns_failed) >=
1307 metatask->taskinfo.startstop.total_luns)
1310 mtx_unlock(&softc->lock);
1313 printf("%s: LUN %d %s request failed\n", __func__,
1314 lun_io->lun->lun_id, (is_start == 1) ? "start" :
1316 ctl_io_error_print(io, &lun_io->lun->inq_data);
1319 if (metatask->taskinfo.startstop.luns_failed > 0)
1320 metatask->status = CFI_MT_ERROR;
1322 metatask->status = CFI_MT_SUCCESS;
1323 cfi_metatask_done(softc, metatask);
1325 mtx_lock(&softc->lock);
1326 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1327 mtx_unlock(&softc->lock);
1332 case CFI_TASK_BBRREAD: {
1334 * Translate the SCSI error into an enumeration.
1336 cfi_metatask_bbr_errorparse(metatask, io);
1338 mtx_lock(&softc->lock);
1339 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1340 mtx_unlock(&softc->lock);
1344 cfi_metatask_done(softc, metatask);
1349 * This shouldn't happen.
1351 mtx_lock(&softc->lock);
1352 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1353 mtx_unlock(&softc->lock);
1361 cfi_err_recovery_done(union ctl_io *io)
1363 struct cfi_lun_io *lun_io, *orig_lun_io;
1364 struct cfi_lun *lun;
1365 union ctl_io *orig_io;
1367 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1368 orig_lun_io = lun_io->orig_lun_io;
1369 orig_io = orig_lun_io->ctl_io;
1372 if (io->io_hdr.status != CTL_SUCCESS) {
1373 printf("%s: error recovery action failed. Original "
1374 "error:\n", __func__);
1376 ctl_io_error_print(orig_lun_io->ctl_io, &lun->inq_data);
1378 printf("%s: error from error recovery action:\n", __func__);
1380 ctl_io_error_print(io, &lun->inq_data);
1382 printf("%s: trying original command again...\n", __func__);
1385 mtx_lock(&lun->softc->lock);
1386 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1387 mtx_unlock(&lun->softc->lock);
1390 orig_io->io_hdr.retries--;
1391 orig_io->io_hdr.status = CTL_STATUS_NONE;
1393 if (ctl_queue(orig_io) != CTL_RETVAL_COMPLETE) {
1394 printf("%s: error returned from ctl_queue()!\n", __func__);
1395 STAILQ_REMOVE(&lun->io_list, orig_lun_io,
1397 ctl_free_io(orig_io);
1402 cfi_lun_io_done(union ctl_io *io)
1404 struct cfi_lun *lun;
1405 struct cfi_lun_io *lun_io;
1407 lun_io = (struct cfi_lun_io *)
1408 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
1411 if (lun_io->metatask == NULL) {
1412 printf("%s: I/O has no metatask pointer, discarding\n",
1414 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1418 cfi_metatask_io_done(io);
1422 cfi_action(struct cfi_metatask *metatask)
1424 struct cfi_softc *softc;
1426 softc = &fetd_internal_softc;
1428 mtx_lock(&softc->lock);
1430 STAILQ_INSERT_TAIL(&softc->metatask_list, metatask, links);
1432 if ((softc->flags & CFI_ONLINE) == 0) {
1433 mtx_unlock(&softc->lock);
1434 metatask->status = CFI_MT_PORT_OFFLINE;
1435 cfi_metatask_done(softc, metatask);
1438 mtx_unlock(&softc->lock);
1440 switch (metatask->tasktype) {
1441 case CFI_TASK_STARTUP:
1442 case CFI_TASK_SHUTDOWN: {
1444 int da_luns, ios_allocated, do_start;
1445 struct cfi_lun *lun;
1446 STAILQ_HEAD(, ctl_io_hdr) tmp_io_list;
1450 STAILQ_INIT(&tmp_io_list);
1452 if (metatask->tasktype == CFI_TASK_STARTUP)
1457 mtx_lock(&softc->lock);
1458 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1459 if (lun->state != CFI_LUN_READY)
1462 if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1465 io = ctl_alloc_io(softc->fe.ctl_pool_ref);
1468 STAILQ_INSERT_TAIL(&tmp_io_list, &io->io_hdr,
1473 if (ios_allocated < da_luns) {
1474 printf("%s: error allocating ctl_io for %s\n",
1475 __func__, (do_start == 1) ? "startup" :
1477 da_luns = ios_allocated;
1480 metatask->taskinfo.startstop.total_luns = da_luns;
1482 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1483 struct cfi_lun_io *lun_io;
1485 if (lun->state != CFI_LUN_READY)
1488 if (SID_TYPE(&lun->inq_data) != T_DIRECT)
1491 io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1495 STAILQ_REMOVE(&tmp_io_list, &io->io_hdr, ctl_io_hdr,
1498 ctl_scsi_start_stop(io,
1502 /*power_conditions*/
1505 /*ctl_tag_type*/ CTL_TAG_ORDERED,
1510 /*metatask*/ metatask,
1511 /*policy*/ CFI_ERR_HARD,
1513 /*orig_lun_io*/ NULL,
1514 /*done_function*/ cfi_lun_io_done);
1516 lun_io = (struct cfi_lun_io *) io->io_hdr.port_priv;
1518 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1520 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1521 printf("%s: error returned from ctl_queue()!\n",
1523 STAILQ_REMOVE(&lun->io_list, lun_io,
1526 metatask->taskinfo.startstop.total_luns--;
1530 if (STAILQ_FIRST(&tmp_io_list) != NULL) {
1531 printf("%s: error: tmp_io_list != NULL\n", __func__);
1532 for (io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list);
1534 io = (union ctl_io *)STAILQ_FIRST(&tmp_io_list)) {
1535 STAILQ_REMOVE(&tmp_io_list, &io->io_hdr,
1540 mtx_unlock(&softc->lock);
1544 case CFI_TASK_BBRREAD: {
1546 struct cfi_lun *lun;
1547 struct cfi_lun_io *lun_io;
1548 cfi_bbrread_status status;
1550 uint32_t num_blocks;
1552 status = CFI_BBR_SUCCESS;
1554 req_lun_num = metatask->taskinfo.bbrread.lun_num;
1556 mtx_lock(&softc->lock);
1557 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1558 if (lun->lun_id != req_lun_num)
1560 if (lun->state != CFI_LUN_READY) {
1561 status = CFI_BBR_LUN_UNCONFIG;
1568 status = CFI_BBR_NO_LUN;
1570 if (status != CFI_BBR_SUCCESS) {
1571 metatask->status = CFI_MT_ERROR;
1572 metatask->taskinfo.bbrread.status = status;
1573 mtx_unlock(&softc->lock);
1574 cfi_metatask_done(softc, metatask);
1579 * Convert the number of bytes given into blocks and check
1580 * that the number of bytes is a multiple of the blocksize.
1581 * CTL will verify that the LBA is okay.
1583 if (lun->blocksize_powerof2 != 0) {
1584 if ((metatask->taskinfo.bbrread.len &
1585 (lun->blocksize - 1)) != 0) {
1586 metatask->status = CFI_MT_ERROR;
1587 metatask->taskinfo.bbrread.status =
1589 cfi_metatask_done(softc, metatask);
1593 num_blocks = metatask->taskinfo.bbrread.len >>
1594 lun->blocksize_powerof2;
1597 * XXX KDM this could result in floating point
1598 * division, which isn't supported in the kernel on
1601 if ((metatask->taskinfo.bbrread.len %
1602 lun->blocksize) != 0) {
1603 metatask->status = CFI_MT_ERROR;
1604 metatask->taskinfo.bbrread.status =
1606 cfi_metatask_done(softc, metatask);
1611 * XXX KDM this could result in floating point
1612 * division in some cases.
1614 num_blocks = metatask->taskinfo.bbrread.len /
1619 io = ctl_alloc_io(softc->fe.ctl_pool_ref);
1621 metatask->status = CFI_MT_ERROR;
1622 metatask->taskinfo.bbrread.status = CFI_BBR_NO_MEM;
1623 mtx_unlock(&softc->lock);
1624 cfi_metatask_done(softc, metatask);
1629 * XXX KDM need to do a read capacity to get the blocksize
1632 ctl_scsi_read_write(io,
1634 /*data_len*/ metatask->taskinfo.bbrread.len,
1637 /*minimum_cdb_size*/ 0,
1638 /*lba*/ metatask->taskinfo.bbrread.lba,
1639 /*num_blocks*/ num_blocks,
1640 /*tag_type*/ CTL_TAG_SIMPLE,
1645 /*metatask*/ metatask,
1646 /*policy*/ CFI_ERR_SOFT,
1648 /*orig_lun_io*/ NULL,
1649 /*done_function*/ cfi_lun_io_done);
1651 lun_io = (struct cfi_lun_io *)io->io_hdr.port_priv;
1653 STAILQ_INSERT_TAIL(&lun->io_list, lun_io, links);
1655 if (ctl_queue(io) != CTL_RETVAL_COMPLETE) {
1656 printf("%s: error returned from ctl_queue()!\n",
1658 STAILQ_REMOVE(&lun->io_list, lun_io, cfi_lun_io, links);
1660 metatask->status = CFI_MT_ERROR;
1661 metatask->taskinfo.bbrread.status = CFI_BBR_ERROR;
1662 mtx_unlock(&softc->lock);
1663 cfi_metatask_done(softc, metatask);
1667 mtx_unlock(&softc->lock);
1671 panic("invalid metatask type %d", metatask->tasktype);
1672 break; /* NOTREACHED */
1678 cfi_shutdown_shelf(cfi_cb_t callback, void *callback_arg)
1680 struct ctl_mem_element *element;
1681 struct cfi_softc *softc;
1682 struct cfi_metatask *metatask;
1684 softc = &fetd_internal_softc;
1686 element = ctl_alloc_mem_element(&softc->metatask_pool, /*can_wait*/ 0);
1687 if (element == NULL) {
1688 callback(callback_arg,
1689 /*status*/ CFI_MT_ERROR,
1691 /*sluns_complete*/ 0,
1692 /*sluns_failed*/ 0);
1696 metatask = (struct cfi_metatask *)element->bytes;
1698 memset(metatask, 0, sizeof(*metatask));
1699 metatask->tasktype = CFI_TASK_SHUTDOWN;
1700 metatask->status = CFI_MT_NONE;
1701 metatask->taskinfo.startstop.callback = callback;
1702 metatask->taskinfo.startstop.callback_arg = callback_arg;
1703 metatask->element = element;
1705 cfi_action(softc, metatask);
1708 * - send a report luns to lun 0, get LUN list.
1709 * - send an inquiry to each lun
1710 * - send a stop/offline to each direct access LUN
1711 * - if we get a reservation conflict, reset the LUN and then
1712 * retry sending the stop/offline
1713 * - return status back to the caller
1718 cfi_start_shelf(cfi_cb_t callback, void *callback_arg)
1720 struct ctl_mem_element *element;
1721 struct cfi_softc *softc;
1722 struct cfi_metatask *metatask;
1724 softc = &fetd_internal_softc;
1726 element = ctl_alloc_mem_element(&softc->metatask_pool, /*can_wait*/ 0);
1727 if (element == NULL) {
1728 callback(callback_arg,
1729 /*status*/ CFI_MT_ERROR,
1731 /*sluns_complete*/ 0,
1732 /*sluns_failed*/ 0);
1736 metatask = (struct cfi_metatask *)element->bytes;
1738 memset(metatask, 0, sizeof(*metatask));
1739 metatask->tasktype = CFI_TASK_STARTUP;
1740 metatask->status = CFI_MT_NONE;
1741 metatask->taskinfo.startstop.callback = callback;
1742 metatask->taskinfo.startstop.callback_arg = callback_arg;
1743 metatask->element = element;
1745 cfi_action(softc, metatask);
1748 * - send a report luns to lun 0, get LUN list.
1749 * - send an inquiry to each lun
1750 * - send a stop/offline to each direct access LUN
1751 * - if we get a reservation conflict, reset the LUN and then
1752 * retry sending the stop/offline
1753 * - return status back to the caller
1759 struct cfi_metatask *
1760 cfi_alloc_metatask(int can_wait)
1762 struct ctl_mem_element *element;
1763 struct cfi_metatask *metatask;
1764 struct cfi_softc *softc;
1766 softc = &fetd_internal_softc;
1768 element = ctl_alloc_mem_element(&softc->metatask_pool, can_wait);
1769 if (element == NULL)
1772 metatask = (struct cfi_metatask *)element->bytes;
1773 memset(metatask, 0, sizeof(*metatask));
1774 metatask->status = CFI_MT_NONE;
1775 metatask->element = element;
1781 cfi_free_metatask(struct cfi_metatask *metatask)
1783 ctl_free_mem_element(metatask->element);