2 * Copyright (c) 2003-2009 Silicon Graphics International Corp.
3 * Copyright (c) 2012 The FreeBSD Foundation
6 * Portions of this software were developed by Edward Tomasz Napierala
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 * substantially similar to the "NO WARRANTY" disclaimer below
17 * ("Disclaimer") and any redistribution must be conditioned upon
18 * including a substantially similar Disclaimer requirement for further
19 * binary redistribution.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGES.
37 * CAM Target Layer, a SCSI device emulation subsystem.
39 * Author: Ken Merry <ken@FreeBSD.org>
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/ctype.h>
50 #include <sys/kernel.h>
51 #include <sys/types.h>
52 #include <sys/kthread.h>
54 #include <sys/fcntl.h>
56 #include <sys/module.h>
57 #include <sys/mutex.h>
58 #include <sys/condvar.h>
59 #include <sys/malloc.h>
61 #include <sys/ioccom.h>
62 #include <sys/queue.h>
65 #include <sys/endian.h>
66 #include <sys/sysctl.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_da.h>
72 #include <cam/ctl/ctl_io.h>
73 #include <cam/ctl/ctl.h>
74 #include <cam/ctl/ctl_frontend.h>
75 #include <cam/ctl/ctl_frontend_internal.h>
76 #include <cam/ctl/ctl_util.h>
77 #include <cam/ctl/ctl_backend.h>
78 #include <cam/ctl/ctl_ioctl.h>
79 #include <cam/ctl/ctl_ha.h>
80 #include <cam/ctl/ctl_private.h>
81 #include <cam/ctl/ctl_debug.h>
82 #include <cam/ctl/ctl_scsi_all.h>
83 #include <cam/ctl/ctl_error.h>
85 struct ctl_softc *control_softc = NULL;
88 * Size and alignment macros needed for Copan-specific HA hardware. These
89 * can go away when the HA code is re-written, and uses busdma for any
92 #define CTL_ALIGN_8B(target, source, type) \
93 if (((uint32_t)source & 0x7) != 0) \
94 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\
96 target = (type)source;
98 #define CTL_SIZE_8B(target, size) \
99 if ((size & 0x7) != 0) \
100 target = size + (0x8 - (size & 0x7)); \
104 #define CTL_ALIGN_8B_MARGIN 16
107 * Template mode pages.
111 * Note that these are default values only. The actual values will be
112 * filled in when the user does a mode sense.
114 const static struct copan_debugconf_subpage debugconf_page_default = {
115 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */
116 DBGCNF_SUBPAGE_CODE, /* subpage */
117 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
118 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
119 DBGCNF_VERSION, /* page_version */
120 {CTL_TIME_IO_DEFAULT_SECS>>8,
121 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */
124 const static struct copan_debugconf_subpage debugconf_page_changeable = {
125 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */
126 DBGCNF_SUBPAGE_CODE, /* subpage */
127 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8,
128 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */
129 0, /* page_version */
130 {0xff,0xff}, /* ctl_time_io_secs */
133 const static struct scsi_da_rw_recovery_page rw_er_page_default = {
134 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE,
135 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
136 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE,
137 /*read_retry_count*/0,
138 /*correction_span*/0,
139 /*head_offset_count*/0,
140 /*data_strobe_offset_cnt*/0,
141 /*byte8*/SMS_RWER_LBPERE,
142 /*write_retry_count*/0,
144 /*recovery_time_limit*/{0, 0},
147 const static struct scsi_da_rw_recovery_page rw_er_page_changeable = {
148 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE,
149 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2,
151 /*read_retry_count*/0,
152 /*correction_span*/0,
153 /*head_offset_count*/0,
154 /*data_strobe_offset_cnt*/0,
156 /*write_retry_count*/0,
158 /*recovery_time_limit*/{0, 0},
161 const static struct scsi_format_page format_page_default = {
162 /*page_code*/SMS_FORMAT_DEVICE_PAGE,
163 /*page_length*/sizeof(struct scsi_format_page) - 2,
164 /*tracks_per_zone*/ {0, 0},
165 /*alt_sectors_per_zone*/ {0, 0},
166 /*alt_tracks_per_zone*/ {0, 0},
167 /*alt_tracks_per_lun*/ {0, 0},
168 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff,
169 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff},
170 /*bytes_per_sector*/ {0, 0},
171 /*interleave*/ {0, 0},
172 /*track_skew*/ {0, 0},
173 /*cylinder_skew*/ {0, 0},
175 /*reserved*/ {0, 0, 0}
178 const static struct scsi_format_page format_page_changeable = {
179 /*page_code*/SMS_FORMAT_DEVICE_PAGE,
180 /*page_length*/sizeof(struct scsi_format_page) - 2,
181 /*tracks_per_zone*/ {0, 0},
182 /*alt_sectors_per_zone*/ {0, 0},
183 /*alt_tracks_per_zone*/ {0, 0},
184 /*alt_tracks_per_lun*/ {0, 0},
185 /*sectors_per_track*/ {0, 0},
186 /*bytes_per_sector*/ {0, 0},
187 /*interleave*/ {0, 0},
188 /*track_skew*/ {0, 0},
189 /*cylinder_skew*/ {0, 0},
191 /*reserved*/ {0, 0, 0}
194 const static struct scsi_rigid_disk_page rigid_disk_page_default = {
195 /*page_code*/SMS_RIGID_DISK_PAGE,
196 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
197 /*cylinders*/ {0, 0, 0},
198 /*heads*/ CTL_DEFAULT_HEADS,
199 /*start_write_precomp*/ {0, 0, 0},
200 /*start_reduced_current*/ {0, 0, 0},
201 /*step_rate*/ {0, 0},
202 /*landing_zone_cylinder*/ {0, 0, 0},
203 /*rpl*/ SRDP_RPL_DISABLED,
204 /*rotational_offset*/ 0,
206 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff,
207 CTL_DEFAULT_ROTATION_RATE & 0xff},
211 const static struct scsi_rigid_disk_page rigid_disk_page_changeable = {
212 /*page_code*/SMS_RIGID_DISK_PAGE,
213 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2,
214 /*cylinders*/ {0, 0, 0},
216 /*start_write_precomp*/ {0, 0, 0},
217 /*start_reduced_current*/ {0, 0, 0},
218 /*step_rate*/ {0, 0},
219 /*landing_zone_cylinder*/ {0, 0, 0},
221 /*rotational_offset*/ 0,
223 /*rotation_rate*/ {0, 0},
227 const static struct scsi_caching_page caching_page_default = {
228 /*page_code*/SMS_CACHING_PAGE,
229 /*page_length*/sizeof(struct scsi_caching_page) - 2,
230 /*flags1*/ SCP_DISC | SCP_WCE,
232 /*disable_pf_transfer_len*/ {0xff, 0xff},
233 /*min_prefetch*/ {0, 0},
234 /*max_prefetch*/ {0xff, 0xff},
235 /*max_pf_ceiling*/ {0xff, 0xff},
237 /*cache_segments*/ 0,
238 /*cache_seg_size*/ {0, 0},
240 /*non_cache_seg_size*/ {0, 0, 0}
243 const static struct scsi_caching_page caching_page_changeable = {
244 /*page_code*/SMS_CACHING_PAGE,
245 /*page_length*/sizeof(struct scsi_caching_page) - 2,
246 /*flags1*/ SCP_WCE | SCP_RCD,
248 /*disable_pf_transfer_len*/ {0, 0},
249 /*min_prefetch*/ {0, 0},
250 /*max_prefetch*/ {0, 0},
251 /*max_pf_ceiling*/ {0, 0},
253 /*cache_segments*/ 0,
254 /*cache_seg_size*/ {0, 0},
256 /*non_cache_seg_size*/ {0, 0, 0}
259 const static struct scsi_control_page control_page_default = {
260 /*page_code*/SMS_CONTROL_MODE_PAGE,
261 /*page_length*/sizeof(struct scsi_control_page) - 2,
263 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED,
266 /*aen_holdoff_period*/{0, 0},
267 /*busy_timeout_period*/{0, 0},
268 /*extended_selftest_completion_time*/{0, 0}
271 const static struct scsi_control_page control_page_changeable = {
272 /*page_code*/SMS_CONTROL_MODE_PAGE,
273 /*page_length*/sizeof(struct scsi_control_page) - 2,
275 /*queue_flags*/SCP_QUEUE_ALG_MASK,
276 /*eca_and_aen*/SCP_SWP,
278 /*aen_holdoff_period*/{0, 0},
279 /*busy_timeout_period*/{0, 0},
280 /*extended_selftest_completion_time*/{0, 0}
283 const static struct scsi_info_exceptions_page ie_page_default = {
284 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE,
285 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
286 /*info_flags*/SIEP_FLAGS_DEXCPT,
288 /*interval_timer*/{0, 0, 0, 0},
289 /*report_count*/{0, 0, 0, 0}
292 const static struct scsi_info_exceptions_page ie_page_changeable = {
293 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE,
294 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2,
297 /*interval_timer*/{0, 0, 0, 0},
298 /*report_count*/{0, 0, 0, 0}
301 #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4)
303 const static struct ctl_logical_block_provisioning_page lbp_page_default = {{
304 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
305 /*subpage_code*/0x02,
306 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
308 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
313 /*count*/{0, 0, 0, 0}},
317 /*count*/{0, 0, 0, 0}},
321 /*count*/{0, 0, 0, 0}},
325 /*count*/{0, 0, 0, 0}}
329 const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{
330 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
331 /*subpage_code*/0x02,
332 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
334 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
339 /*count*/{0, 0, 0, 0}},
343 /*count*/{0, 0, 0, 0}},
347 /*count*/{0, 0, 0, 0}},
351 /*count*/{0, 0, 0, 0}}
356 * XXX KDM move these into the softc.
358 static int rcv_sync_msg;
359 static uint8_t ctl_pause_rtr;
361 SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer");
362 static int worker_threads = -1;
363 TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads);
364 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN,
365 &worker_threads, 1, "Number of worker threads");
366 static int ctl_debug = CTL_DEBUG_NONE;
367 TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug);
368 SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN,
369 &ctl_debug, 0, "Enabled debug flags");
372 * Supported pages (0x00), Serial number (0x80), Device ID (0x83),
373 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87),
374 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0),
375 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2)
377 #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10
379 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event,
381 static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest);
382 static int ctl_init(void);
383 void ctl_shutdown(void);
384 static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td);
385 static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td);
386 static void ctl_ioctl_online(void *arg);
387 static void ctl_ioctl_offline(void *arg);
388 static int ctl_ioctl_lun_enable(void *arg, int lun_id);
389 static int ctl_ioctl_lun_disable(void *arg, int lun_id);
390 static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio);
391 static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio);
392 static int ctl_ioctl_submit_wait(union ctl_io *io);
393 static void ctl_ioctl_datamove(union ctl_io *io);
394 static void ctl_ioctl_done(union ctl_io *io);
395 static void ctl_ioctl_hard_startstop_callback(void *arg,
396 struct cfi_metatask *metatask);
397 static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask);
398 static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
399 struct ctl_ooa *ooa_hdr,
400 struct ctl_ooa_entry *kern_entries);
401 static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
403 static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
404 struct ctl_be_lun *be_lun);
405 static int ctl_free_lun(struct ctl_lun *lun);
406 static void ctl_create_lun(struct ctl_be_lun *be_lun);
407 static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr);
409 static void ctl_failover_change_pages(struct ctl_softc *softc,
410 struct ctl_scsiio *ctsio, int master);
413 static int ctl_do_mode_select(union ctl_io *io);
414 static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
415 uint64_t res_key, uint64_t sa_res_key,
416 uint8_t type, uint32_t residx,
417 struct ctl_scsiio *ctsio,
418 struct scsi_per_res_out *cdb,
419 struct scsi_per_res_out_parms* param);
420 static void ctl_pro_preempt_other(struct ctl_lun *lun,
421 union ctl_ha_msg *msg);
422 static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg);
423 static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
424 static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
425 static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
426 static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len);
427 static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len);
428 static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio,
430 static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio,
432 static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len);
433 static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len);
434 static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio);
435 static int ctl_inquiry_std(struct ctl_scsiio *ctsio);
436 static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len);
437 static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2,
439 static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2);
440 static ctl_action ctl_check_for_blockage(struct ctl_lun *lun,
441 union ctl_io *pending_io, union ctl_io *ooa_io);
442 static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
443 union ctl_io *starting_io);
444 static int ctl_check_blocked(struct ctl_lun *lun);
445 static int ctl_scsiio_lun_check(struct ctl_lun *lun,
446 const struct ctl_cmd_entry *entry,
447 struct ctl_scsiio *ctsio);
448 //static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc);
449 static void ctl_failover(void);
450 static void ctl_clear_ua(struct ctl_softc *ctl_softc, uint32_t initidx,
451 ctl_ua_type ua_type);
452 static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
453 struct ctl_scsiio *ctsio);
454 static int ctl_scsiio(struct ctl_scsiio *ctsio);
456 static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
457 static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
458 ctl_ua_type ua_type);
459 static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
460 ctl_ua_type ua_type);
461 static int ctl_abort_task(union ctl_io *io);
462 static int ctl_abort_task_set(union ctl_io *io);
463 static int ctl_i_t_nexus_reset(union ctl_io *io);
464 static void ctl_run_task(union ctl_io *io);
466 static void ctl_datamove_timer_wakeup(void *arg);
467 static void ctl_done_timer_wakeup(void *arg);
468 #endif /* CTL_IO_DELAY */
470 static void ctl_send_datamove_done(union ctl_io *io, int have_lock);
471 static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq);
472 static int ctl_datamove_remote_dm_write_cb(union ctl_io *io);
473 static void ctl_datamove_remote_write(union ctl_io *io);
474 static int ctl_datamove_remote_dm_read_cb(union ctl_io *io);
475 static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq);
476 static int ctl_datamove_remote_sgl_setup(union ctl_io *io);
477 static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
478 ctl_ha_dt_cb callback);
479 static void ctl_datamove_remote_read(union ctl_io *io);
480 static void ctl_datamove_remote(union ctl_io *io);
481 static int ctl_process_done(union ctl_io *io);
482 static void ctl_lun_thread(void *arg);
483 static void ctl_thresh_thread(void *arg);
484 static void ctl_work_thread(void *arg);
485 static void ctl_enqueue_incoming(union ctl_io *io);
486 static void ctl_enqueue_rtr(union ctl_io *io);
487 static void ctl_enqueue_done(union ctl_io *io);
488 static void ctl_enqueue_isc(union ctl_io *io);
489 static const struct ctl_cmd_entry *
490 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa);
491 static const struct ctl_cmd_entry *
492 ctl_validate_command(struct ctl_scsiio *ctsio);
493 static int ctl_cmd_applicable(uint8_t lun_type,
494 const struct ctl_cmd_entry *entry);
497 * Load the serialization table. This isn't very pretty, but is probably
498 * the easiest way to do it.
500 #include "ctl_ser_table.c"
503 * We only need to define open, close and ioctl routines for this driver.
505 static struct cdevsw ctl_cdevsw = {
506 .d_version = D_VERSION,
509 .d_close = ctl_close,
510 .d_ioctl = ctl_ioctl,
515 MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL");
516 MALLOC_DEFINE(M_CTLIO, "ctlio", "Memory used for CTL requests");
518 static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *);
520 static moduledata_t ctl_moduledata = {
522 ctl_module_event_handler,
526 DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD);
527 MODULE_VERSION(ctl, 1);
529 static struct ctl_frontend ioctl_frontend =
535 ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
536 union ctl_ha_msg *msg_info)
538 struct ctl_scsiio *ctsio;
540 if (msg_info->hdr.original_sc == NULL) {
541 printf("%s: original_sc == NULL!\n", __func__);
542 /* XXX KDM now what? */
546 ctsio = &msg_info->hdr.original_sc->scsiio;
547 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
548 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
549 ctsio->io_hdr.status = msg_info->hdr.status;
550 ctsio->scsi_status = msg_info->scsi.scsi_status;
551 ctsio->sense_len = msg_info->scsi.sense_len;
552 ctsio->sense_residual = msg_info->scsi.sense_residual;
553 ctsio->residual = msg_info->scsi.residual;
554 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
555 sizeof(ctsio->sense_data));
556 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
557 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));
558 ctl_enqueue_isc((union ctl_io *)ctsio);
562 ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc,
563 union ctl_ha_msg *msg_info)
565 struct ctl_scsiio *ctsio;
567 if (msg_info->hdr.serializing_sc == NULL) {
568 printf("%s: serializing_sc == NULL!\n", __func__);
569 /* XXX KDM now what? */
573 ctsio = &msg_info->hdr.serializing_sc->scsiio;
576 * Attempt to catch the situation where an I/O has
577 * been freed, and we're using it again.
579 if (ctsio->io_hdr.io_type == 0xff) {
580 union ctl_io *tmp_io;
581 tmp_io = (union ctl_io *)ctsio;
582 printf("%s: %p use after free!\n", __func__,
584 printf("%s: type %d msg %d cdb %x iptl: "
585 "%d:%d:%d:%d tag 0x%04x "
586 "flag %#x status %x\n",
588 tmp_io->io_hdr.io_type,
589 tmp_io->io_hdr.msg_type,
590 tmp_io->scsiio.cdb[0],
591 tmp_io->io_hdr.nexus.initid.id,
592 tmp_io->io_hdr.nexus.targ_port,
593 tmp_io->io_hdr.nexus.targ_target.id,
594 tmp_io->io_hdr.nexus.targ_lun,
595 (tmp_io->io_hdr.io_type ==
597 tmp_io->taskio.tag_num :
598 tmp_io->scsiio.tag_num,
599 tmp_io->io_hdr.flags,
600 tmp_io->io_hdr.status);
603 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO;
604 ctl_enqueue_isc((union ctl_io *)ctsio);
608 * ISC (Inter Shelf Communication) event handler. Events from the HA
609 * subsystem come in here.
612 ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
614 struct ctl_softc *softc;
616 struct ctl_prio *presio;
617 ctl_ha_status isc_status;
619 softc = control_softc;
624 printf("CTL: Isc Msg event %d\n", event);
626 if (event == CTL_HA_EVT_MSG_RECV) {
627 union ctl_ha_msg msg_info;
629 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
630 sizeof(msg_info), /*wait*/ 0);
632 printf("CTL: msg_type %d\n", msg_info.msg_type);
634 if (isc_status != 0) {
635 printf("Error receiving message, status = %d\n",
640 switch (msg_info.hdr.msg_type) {
641 case CTL_MSG_SERIALIZE:
643 printf("Serialize\n");
645 io = ctl_alloc_io_nowait(softc->othersc_pool);
647 printf("ctl_isc_event_handler: can't allocate "
650 /* Need to set busy and send msg back */
651 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
652 msg_info.hdr.status = CTL_SCSI_ERROR;
653 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY;
654 msg_info.scsi.sense_len = 0;
655 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
656 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){
661 // populate ctsio from msg_info
662 io->io_hdr.io_type = CTL_IO_SCSI;
663 io->io_hdr.msg_type = CTL_MSG_SERIALIZE;
664 io->io_hdr.original_sc = msg_info.hdr.original_sc;
666 printf("pOrig %x\n", (int)msg_info.original_sc);
668 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC |
671 * If we're in serialization-only mode, we don't
672 * want to go through full done processing. Thus
675 * XXX KDM add another flag that is more specific.
677 if (softc->ha_mode == CTL_HA_MODE_SER_ONLY)
678 io->io_hdr.flags |= CTL_FLAG_INT_COPY;
679 io->io_hdr.nexus = msg_info.hdr.nexus;
681 printf("targ %d, port %d, iid %d, lun %d\n",
682 io->io_hdr.nexus.targ_target.id,
683 io->io_hdr.nexus.targ_port,
684 io->io_hdr.nexus.initid.id,
685 io->io_hdr.nexus.targ_lun);
687 io->scsiio.tag_num = msg_info.scsi.tag_num;
688 io->scsiio.tag_type = msg_info.scsi.tag_type;
689 memcpy(io->scsiio.cdb, msg_info.scsi.cdb,
691 if (softc->ha_mode == CTL_HA_MODE_XFER) {
692 const struct ctl_cmd_entry *entry;
694 entry = ctl_get_cmd_entry(&io->scsiio, NULL);
695 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
697 entry->flags & CTL_FLAG_DATA_MASK;
702 /* Performed on the Originating SC, XFER mode only */
703 case CTL_MSG_DATAMOVE: {
704 struct ctl_sg_entry *sgl;
707 io = msg_info.hdr.original_sc;
709 printf("%s: original_sc == NULL!\n", __func__);
710 /* XXX KDM do something here */
713 io->io_hdr.msg_type = CTL_MSG_DATAMOVE;
714 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
716 * Keep track of this, we need to send it back over
717 * when the datamove is complete.
719 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
721 if (msg_info.dt.sg_sequence == 0) {
723 * XXX KDM we use the preallocated S/G list
724 * here, but we'll need to change this to
725 * dynamic allocation if we need larger S/G
728 if (msg_info.dt.kern_sg_entries >
729 sizeof(io->io_hdr.remote_sglist) /
730 sizeof(io->io_hdr.remote_sglist[0])) {
731 printf("%s: number of S/G entries "
732 "needed %u > allocated num %zd\n",
734 msg_info.dt.kern_sg_entries,
735 sizeof(io->io_hdr.remote_sglist)/
736 sizeof(io->io_hdr.remote_sglist[0]));
739 * XXX KDM send a message back to
740 * the other side to shut down the
741 * DMA. The error will come back
742 * through via the normal channel.
746 sgl = io->io_hdr.remote_sglist;
748 sizeof(io->io_hdr.remote_sglist));
750 io->scsiio.kern_data_ptr = (uint8_t *)sgl;
752 io->scsiio.kern_sg_entries =
753 msg_info.dt.kern_sg_entries;
754 io->scsiio.rem_sg_entries =
755 msg_info.dt.kern_sg_entries;
756 io->scsiio.kern_data_len =
757 msg_info.dt.kern_data_len;
758 io->scsiio.kern_total_len =
759 msg_info.dt.kern_total_len;
760 io->scsiio.kern_data_resid =
761 msg_info.dt.kern_data_resid;
762 io->scsiio.kern_rel_offset =
763 msg_info.dt.kern_rel_offset;
765 * Clear out per-DMA flags.
767 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK;
769 * Add per-DMA flags that are set for this
770 * particular DMA request.
772 io->io_hdr.flags |= msg_info.dt.flags &
775 sgl = (struct ctl_sg_entry *)
776 io->scsiio.kern_data_ptr;
778 for (i = msg_info.dt.sent_sg_entries, j = 0;
779 i < (msg_info.dt.sent_sg_entries +
780 msg_info.dt.cur_sg_entries); i++, j++) {
781 sgl[i].addr = msg_info.dt.sg_list[j].addr;
782 sgl[i].len = msg_info.dt.sg_list[j].len;
785 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n",
787 msg_info.dt.sg_list[j].addr,
788 msg_info.dt.sg_list[j].len,
789 sgl[i].addr, sgl[i].len, j, i);
793 memcpy(&sgl[msg_info.dt.sent_sg_entries],
795 sizeof(*sgl) * msg_info.dt.cur_sg_entries);
799 * If this is the last piece of the I/O, we've got
800 * the full S/G list. Queue processing in the thread.
801 * Otherwise wait for the next piece.
803 if (msg_info.dt.sg_last != 0)
807 /* Performed on the Serializing (primary) SC, XFER mode only */
808 case CTL_MSG_DATAMOVE_DONE: {
809 if (msg_info.hdr.serializing_sc == NULL) {
810 printf("%s: serializing_sc == NULL!\n",
812 /* XXX KDM now what? */
816 * We grab the sense information here in case
817 * there was a failure, so we can return status
818 * back to the initiator.
820 io = msg_info.hdr.serializing_sc;
821 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
822 io->io_hdr.status = msg_info.hdr.status;
823 io->scsiio.scsi_status = msg_info.scsi.scsi_status;
824 io->scsiio.sense_len = msg_info.scsi.sense_len;
825 io->scsiio.sense_residual =msg_info.scsi.sense_residual;
826 io->io_hdr.port_status = msg_info.scsi.fetd_status;
827 io->scsiio.residual = msg_info.scsi.residual;
828 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data,
829 sizeof(io->scsiio.sense_data));
834 /* Preformed on Originating SC, SER_ONLY mode */
836 io = msg_info.hdr.original_sc;
838 printf("%s: Major Bummer\n", __func__);
842 printf("pOrig %x\n",(int) ctsio);
845 io->io_hdr.msg_type = CTL_MSG_R2R;
846 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc;
851 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY
853 * Performed on the Originating (i.e. secondary) SC in XFER
856 case CTL_MSG_FINISH_IO:
857 if (softc->ha_mode == CTL_HA_MODE_XFER)
858 ctl_isc_handler_finish_xfer(softc,
861 ctl_isc_handler_finish_ser_only(softc,
865 /* Preformed on Originating SC */
866 case CTL_MSG_BAD_JUJU:
867 io = msg_info.hdr.original_sc;
869 printf("%s: Bad JUJU!, original_sc is NULL!\n",
873 ctl_copy_sense_data(&msg_info, io);
875 * IO should have already been cleaned up on other
876 * SC so clear this flag so we won't send a message
877 * back to finish the IO there.
879 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
880 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE;
882 /* io = msg_info.hdr.serializing_sc; */
883 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU;
887 /* Handle resets sent from the other side */
888 case CTL_MSG_MANAGE_TASKS: {
889 struct ctl_taskio *taskio;
890 taskio = (struct ctl_taskio *)ctl_alloc_io_nowait(
891 softc->othersc_pool);
892 if (taskio == NULL) {
893 printf("ctl_isc_event_handler: can't allocate "
896 /* should I just call the proper reset func
900 ctl_zero_io((union ctl_io *)taskio);
901 taskio->io_hdr.io_type = CTL_IO_TASK;
902 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC;
903 taskio->io_hdr.nexus = msg_info.hdr.nexus;
904 taskio->task_action = msg_info.task.task_action;
905 taskio->tag_num = msg_info.task.tag_num;
906 taskio->tag_type = msg_info.task.tag_type;
908 taskio->io_hdr.start_time = time_uptime;
909 getbintime(&taskio->io_hdr.start_bt);
911 cs_prof_gettime(&taskio->io_hdr.start_ticks);
913 #endif /* CTL_TIME_IO */
914 ctl_run_task((union ctl_io *)taskio);
917 /* Persistent Reserve action which needs attention */
918 case CTL_MSG_PERS_ACTION:
919 presio = (struct ctl_prio *)ctl_alloc_io_nowait(
920 softc->othersc_pool);
921 if (presio == NULL) {
922 printf("ctl_isc_event_handler: can't allocate "
925 /* Need to set busy and send msg back */
928 ctl_zero_io((union ctl_io *)presio);
929 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION;
930 presio->pr_msg = msg_info.pr;
931 ctl_enqueue_isc((union ctl_io *)presio);
933 case CTL_MSG_SYNC_FE:
937 printf("How did I get here?\n");
939 } else if (event == CTL_HA_EVT_MSG_SENT) {
940 if (param != CTL_HA_STATUS_SUCCESS) {
941 printf("Bad status from ctl_ha_msg_send status %d\n",
945 } else if (event == CTL_HA_EVT_DISCONNECT) {
946 printf("CTL: Got a disconnect from Isc\n");
949 printf("ctl_isc_event_handler: Unknown event %d\n", event);
958 ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest)
960 struct scsi_sense_data *sense;
962 sense = &dest->scsiio.sense_data;
963 bcopy(&src->scsi.sense_data, sense, sizeof(*sense));
964 dest->scsiio.scsi_status = src->scsi.scsi_status;
965 dest->scsiio.sense_len = src->scsi.sense_len;
966 dest->io_hdr.status = src->hdr.status;
970 ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
974 mtx_assert(&lun->lun_lock, MA_OWNED);
975 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
978 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua;
982 ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
986 mtx_assert(&lun->lun_lock, MA_OWNED);
987 for (i = 0; i < CTL_MAX_PORTS; i++) {
988 if (lun->pending_ua[i] == NULL)
990 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
991 if (i * CTL_MAX_INIT_PER_PORT + j == except)
993 lun->pending_ua[i][j] |= ua;
999 ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
1003 mtx_assert(&lun->lun_lock, MA_OWNED);
1004 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
1007 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua;
1011 ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
1015 mtx_assert(&lun->lun_lock, MA_OWNED);
1016 for (i = 0; i < CTL_MAX_PORTS; i++) {
1017 if (lun->pending_ua[i] == NULL)
1019 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
1020 if (i * CTL_MAX_INIT_PER_PORT + j == except)
1022 lun->pending_ua[i][j] &= ~ua;
1028 ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS)
1030 struct ctl_softc *softc = (struct ctl_softc *)arg1;
1031 struct ctl_lun *lun;
1034 if (softc->flags & CTL_FLAG_ACTIVE_SHELF)
1039 error = sysctl_handle_int(oidp, &value, 0, req);
1040 if ((error != 0) || (req->newptr == NULL))
1043 mtx_lock(&softc->ctl_lock);
1045 softc->flags |= CTL_FLAG_ACTIVE_SHELF;
1047 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF;
1048 STAILQ_FOREACH(lun, &softc->lun_list, links) {
1049 mtx_lock(&lun->lun_lock);
1050 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
1051 mtx_unlock(&lun->lun_lock);
1053 mtx_unlock(&softc->ctl_lock);
1060 struct ctl_softc *softc;
1062 struct ctl_port *port;
1063 int i, error, retval;
1070 control_softc = malloc(sizeof(*control_softc), M_DEVBUF,
1072 softc = control_softc;
1074 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600,
1077 softc->dev->si_drv1 = softc;
1080 * By default, return a "bad LUN" peripheral qualifier for unknown
1081 * LUNs. The user can override this default using the tunable or
1082 * sysctl. See the comment in ctl_inquiry_std() for more details.
1084 softc->inquiry_pq_no_lun = 1;
1085 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun",
1086 &softc->inquiry_pq_no_lun);
1087 sysctl_ctx_init(&softc->sysctl_ctx);
1088 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1089 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl",
1090 CTLFLAG_RD, 0, "CAM Target Layer");
1092 if (softc->sysctl_tree == NULL) {
1093 printf("%s: unable to allocate sysctl tree\n", __func__);
1094 destroy_dev(softc->dev);
1095 free(control_softc, M_DEVBUF);
1096 control_softc = NULL;
1100 SYSCTL_ADD_INT(&softc->sysctl_ctx,
1101 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
1102 "inquiry_pq_no_lun", CTLFLAG_RW,
1103 &softc->inquiry_pq_no_lun, 0,
1104 "Report no lun possible for invalid LUNs");
1106 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
1107 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io),
1108 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1109 softc->open_count = 0;
1112 * Default to actually sending a SYNCHRONIZE CACHE command down to
1115 softc->flags = CTL_FLAG_REAL_SYNC;
1118 * In Copan's HA scheme, the "master" and "slave" roles are
1119 * figured out through the slot the controller is in. Although it
1120 * is an active/active system, someone has to be in charge.
1122 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1123 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0,
1124 "HA head ID (0 - no HA)");
1125 if (softc->ha_id == 0) {
1126 softc->flags |= CTL_FLAG_ACTIVE_SHELF;
1127 softc->is_single = 1;
1128 softc->port_offset = 0;
1130 softc->port_offset = (softc->ha_id - 1) * CTL_MAX_PORTS;
1131 softc->persis_offset = softc->port_offset * CTL_MAX_INIT_PER_PORT;
1133 STAILQ_INIT(&softc->lun_list);
1134 STAILQ_INIT(&softc->pending_lun_queue);
1135 STAILQ_INIT(&softc->fe_list);
1136 STAILQ_INIT(&softc->port_list);
1137 STAILQ_INIT(&softc->be_list);
1138 ctl_tpc_init(softc);
1140 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC,
1143 printf("ctl: can't allocate %d entry other SC pool, "
1144 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC);
1147 softc->othersc_pool = other_pool;
1149 if (worker_threads <= 0)
1150 worker_threads = max(1, mp_ncpus / 4);
1151 if (worker_threads > CTL_MAX_THREADS)
1152 worker_threads = CTL_MAX_THREADS;
1154 for (i = 0; i < worker_threads; i++) {
1155 struct ctl_thread *thr = &softc->threads[i];
1157 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF);
1158 thr->ctl_softc = softc;
1159 STAILQ_INIT(&thr->incoming_queue);
1160 STAILQ_INIT(&thr->rtr_queue);
1161 STAILQ_INIT(&thr->done_queue);
1162 STAILQ_INIT(&thr->isc_queue);
1164 error = kproc_kthread_add(ctl_work_thread, thr,
1165 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i);
1167 printf("error creating CTL work thread!\n");
1168 ctl_pool_free(other_pool);
1172 error = kproc_kthread_add(ctl_lun_thread, softc,
1173 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun");
1175 printf("error creating CTL lun thread!\n");
1176 ctl_pool_free(other_pool);
1179 error = kproc_kthread_add(ctl_thresh_thread, softc,
1180 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh");
1182 printf("error creating CTL threshold thread!\n");
1183 ctl_pool_free(other_pool);
1188 * Initialize the ioctl front end.
1190 ctl_frontend_register(&ioctl_frontend);
1191 port = &softc->ioctl_info.port;
1192 port->frontend = &ioctl_frontend;
1193 sprintf(softc->ioctl_info.port_name, "ioctl");
1194 port->port_type = CTL_PORT_IOCTL;
1195 port->num_requested_ctl_io = 100;
1196 port->port_name = softc->ioctl_info.port_name;
1197 port->port_online = ctl_ioctl_online;
1198 port->port_offline = ctl_ioctl_offline;
1199 port->onoff_arg = &softc->ioctl_info;
1200 port->lun_enable = ctl_ioctl_lun_enable;
1201 port->lun_disable = ctl_ioctl_lun_disable;
1202 port->targ_lun_arg = &softc->ioctl_info;
1203 port->fe_datamove = ctl_ioctl_datamove;
1204 port->fe_done = ctl_ioctl_done;
1205 port->max_targets = 15;
1206 port->max_target_id = 15;
1208 if (ctl_port_register(&softc->ioctl_info.port) != 0) {
1209 printf("ctl: ioctl front end registration failed, will "
1210 "continue anyway\n");
1213 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree),
1214 OID_AUTO, "ha_state", CTLTYPE_INT | CTLFLAG_RWTUN,
1215 softc, 0, ctl_ha_state_sysctl, "I", "HA state for this head");
1218 if (sizeof(struct callout) > CTL_TIMER_BYTES) {
1219 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n",
1220 sizeof(struct callout), CTL_TIMER_BYTES);
1223 #endif /* CTL_IO_DELAY */
1231 struct ctl_softc *softc;
1232 struct ctl_lun *lun, *next_lun;
1234 softc = (struct ctl_softc *)control_softc;
1236 if (ctl_port_deregister(&softc->ioctl_info.port) != 0)
1237 printf("ctl: ioctl front end deregistration failed\n");
1239 mtx_lock(&softc->ctl_lock);
1244 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
1245 next_lun = STAILQ_NEXT(lun, links);
1249 mtx_unlock(&softc->ctl_lock);
1251 ctl_frontend_deregister(&ioctl_frontend);
1254 ctl_shutdown_thread(softc->work_thread);
1255 mtx_destroy(&softc->queue_lock);
1258 ctl_tpc_shutdown(softc);
1259 uma_zdestroy(softc->io_zone);
1260 mtx_destroy(&softc->ctl_lock);
1262 destroy_dev(softc->dev);
1264 sysctl_ctx_free(&softc->sysctl_ctx);
1266 free(control_softc, M_DEVBUF);
1267 control_softc = NULL;
1271 ctl_module_event_handler(module_t mod, int what, void *arg)
1276 return (ctl_init());
1280 return (EOPNOTSUPP);
1285 * XXX KDM should we do some access checks here? Bump a reference count to
1286 * prevent a CTL module from being unloaded while someone has it open?
1289 ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td)
1295 ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td)
1301 ctl_port_enable(ctl_port_type port_type)
1303 struct ctl_softc *softc = control_softc;
1304 struct ctl_port *port;
1306 if (softc->is_single == 0) {
1307 union ctl_ha_msg msg_info;
1311 printf("%s: HA mode, synchronizing frontend enable\n",
1314 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE;
1315 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1316 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) {
1317 printf("Sync msg send error retval %d\n", isc_retval);
1319 if (!rcv_sync_msg) {
1320 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info,
1321 sizeof(msg_info), 1);
1324 printf("CTL:Frontend Enable\n");
1326 printf("%s: single mode, skipping frontend synchronization\n",
1331 STAILQ_FOREACH(port, &softc->port_list, links) {
1332 if (port_type & port->port_type)
1335 printf("port %d\n", port->targ_port);
1337 ctl_port_online(port);
1345 ctl_port_disable(ctl_port_type port_type)
1347 struct ctl_softc *softc;
1348 struct ctl_port *port;
1350 softc = control_softc;
1352 STAILQ_FOREACH(port, &softc->port_list, links) {
1353 if (port_type & port->port_type)
1354 ctl_port_offline(port);
1361 * Returns 0 for success, 1 for failure.
1362 * Currently the only failure mode is if there aren't enough entries
1363 * allocated. So, in case of a failure, look at num_entries_dropped,
1364 * reallocate and try again.
1367 ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced,
1368 int *num_entries_filled, int *num_entries_dropped,
1369 ctl_port_type port_type, int no_virtual)
1371 struct ctl_softc *softc;
1372 struct ctl_port *port;
1373 int entries_dropped, entries_filled;
1377 softc = control_softc;
1381 entries_dropped = 0;
1384 mtx_lock(&softc->ctl_lock);
1385 STAILQ_FOREACH(port, &softc->port_list, links) {
1386 struct ctl_port_entry *entry;
1388 if ((port->port_type & port_type) == 0)
1391 if ((no_virtual != 0)
1392 && (port->virtual_port != 0))
1395 if (entries_filled >= num_entries_alloced) {
1399 entry = &entries[i];
1401 entry->port_type = port->port_type;
1402 strlcpy(entry->port_name, port->port_name,
1403 sizeof(entry->port_name));
1404 entry->physical_port = port->physical_port;
1405 entry->virtual_port = port->virtual_port;
1406 entry->wwnn = port->wwnn;
1407 entry->wwpn = port->wwpn;
1413 mtx_unlock(&softc->ctl_lock);
1415 if (entries_dropped > 0)
1418 *num_entries_dropped = entries_dropped;
1419 *num_entries_filled = entries_filled;
1425 ctl_ioctl_online(void *arg)
1427 struct ctl_ioctl_info *ioctl_info;
1429 ioctl_info = (struct ctl_ioctl_info *)arg;
1431 ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED;
1435 ctl_ioctl_offline(void *arg)
1437 struct ctl_ioctl_info *ioctl_info;
1439 ioctl_info = (struct ctl_ioctl_info *)arg;
1441 ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED;
1445 * Remove an initiator by port number and initiator ID.
1446 * Returns 0 for success, -1 for failure.
1449 ctl_remove_initiator(struct ctl_port *port, int iid)
1451 struct ctl_softc *softc = control_softc;
1453 mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
1455 if (iid > CTL_MAX_INIT_PER_PORT) {
1456 printf("%s: initiator ID %u > maximun %u!\n",
1457 __func__, iid, CTL_MAX_INIT_PER_PORT);
1461 mtx_lock(&softc->ctl_lock);
1462 port->wwpn_iid[iid].in_use--;
1463 port->wwpn_iid[iid].last_use = time_uptime;
1464 mtx_unlock(&softc->ctl_lock);
1470 * Add an initiator to the initiator map.
1471 * Returns iid for success, < 0 for failure.
1474 ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name)
1476 struct ctl_softc *softc = control_softc;
1480 mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
1482 if (iid >= CTL_MAX_INIT_PER_PORT) {
1483 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n",
1484 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT);
1489 mtx_lock(&softc->ctl_lock);
1491 if (iid < 0 && (wwpn != 0 || name != NULL)) {
1492 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1493 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) {
1497 if (name != NULL && port->wwpn_iid[i].name != NULL &&
1498 strcmp(name, port->wwpn_iid[i].name) == 0) {
1506 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1507 if (port->wwpn_iid[i].in_use == 0 &&
1508 port->wwpn_iid[i].wwpn == 0 &&
1509 port->wwpn_iid[i].name == NULL) {
1518 best_time = INT32_MAX;
1519 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
1520 if (port->wwpn_iid[i].in_use == 0) {
1521 if (port->wwpn_iid[i].last_use < best_time) {
1523 best_time = port->wwpn_iid[i].last_use;
1531 mtx_unlock(&softc->ctl_lock);
1536 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) {
1538 * This is not an error yet.
1540 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) {
1542 printf("%s: port %d iid %u WWPN %#jx arrived"
1543 " again\n", __func__, port->targ_port,
1544 iid, (uintmax_t)wwpn);
1548 if (name != NULL && port->wwpn_iid[iid].name != NULL &&
1549 strcmp(name, port->wwpn_iid[iid].name) == 0) {
1551 printf("%s: port %d iid %u name '%s' arrived"
1552 " again\n", __func__, port->targ_port,
1559 * This is an error, but what do we do about it? The
1560 * driver is telling us we have a new WWPN for this
1561 * initiator ID, so we pretty much need to use it.
1563 printf("%s: port %d iid %u WWPN %#jx '%s' arrived,"
1564 " but WWPN %#jx '%s' is still at that address\n",
1565 __func__, port->targ_port, iid, wwpn, name,
1566 (uintmax_t)port->wwpn_iid[iid].wwpn,
1567 port->wwpn_iid[iid].name);
1570 * XXX KDM clear have_ca and ua_pending on each LUN for
1575 free(port->wwpn_iid[iid].name, M_CTL);
1576 port->wwpn_iid[iid].name = name;
1577 port->wwpn_iid[iid].wwpn = wwpn;
1578 port->wwpn_iid[iid].in_use++;
1579 mtx_unlock(&softc->ctl_lock);
1585 ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf)
1589 switch (port->port_type) {
1592 struct scsi_transportid_fcp *id =
1593 (struct scsi_transportid_fcp *)buf;
1594 if (port->wwpn_iid[iid].wwpn == 0)
1596 memset(id, 0, sizeof(*id));
1597 id->format_protocol = SCSI_PROTO_FC;
1598 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name);
1599 return (sizeof(*id));
1601 case CTL_PORT_ISCSI:
1603 struct scsi_transportid_iscsi_port *id =
1604 (struct scsi_transportid_iscsi_port *)buf;
1605 if (port->wwpn_iid[iid].name == NULL)
1608 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT |
1610 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1;
1611 len = roundup2(min(len, 252), 4);
1612 scsi_ulto2b(len, id->additional_length);
1613 return (sizeof(*id) + len);
1617 struct scsi_transportid_sas *id =
1618 (struct scsi_transportid_sas *)buf;
1619 if (port->wwpn_iid[iid].wwpn == 0)
1621 memset(id, 0, sizeof(*id));
1622 id->format_protocol = SCSI_PROTO_SAS;
1623 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address);
1624 return (sizeof(*id));
1628 struct scsi_transportid_spi *id =
1629 (struct scsi_transportid_spi *)buf;
1630 memset(id, 0, sizeof(*id));
1631 id->format_protocol = SCSI_PROTO_SPI;
1632 scsi_ulto2b(iid, id->scsi_addr);
1633 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id);
1634 return (sizeof(*id));
1640 ctl_ioctl_lun_enable(void *arg, int lun_id)
1646 ctl_ioctl_lun_disable(void *arg, int lun_id)
1652 * Data movement routine for the CTL ioctl frontend port.
1655 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
1657 struct ctl_sg_entry *ext_sglist, *kern_sglist;
1658 struct ctl_sg_entry ext_entry, kern_entry;
1659 int ext_sglen, ext_sg_entries, kern_sg_entries;
1660 int ext_sg_start, ext_offset;
1661 int len_to_copy, len_copied;
1662 int kern_watermark, ext_watermark;
1663 int ext_sglist_malloced;
1666 ext_sglist_malloced = 0;
1670 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
1673 * If this flag is set, fake the data transfer.
1675 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
1676 ctsio->ext_data_filled = ctsio->ext_data_len;
1681 * To simplify things here, if we have a single buffer, stick it in
1682 * a S/G entry and just make it a single entry S/G list.
1684 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) {
1687 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
1689 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
1691 ext_sglist_malloced = 1;
1692 if (copyin(ctsio->ext_data_ptr, ext_sglist,
1694 ctl_set_internal_failure(ctsio,
1699 ext_sg_entries = ctsio->ext_sg_entries;
1701 for (i = 0; i < ext_sg_entries; i++) {
1702 if ((len_seen + ext_sglist[i].len) >=
1703 ctsio->ext_data_filled) {
1705 ext_offset = ctsio->ext_data_filled - len_seen;
1708 len_seen += ext_sglist[i].len;
1711 ext_sglist = &ext_entry;
1712 ext_sglist->addr = ctsio->ext_data_ptr;
1713 ext_sglist->len = ctsio->ext_data_len;
1716 ext_offset = ctsio->ext_data_filled;
1719 if (ctsio->kern_sg_entries > 0) {
1720 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
1721 kern_sg_entries = ctsio->kern_sg_entries;
1723 kern_sglist = &kern_entry;
1724 kern_sglist->addr = ctsio->kern_data_ptr;
1725 kern_sglist->len = ctsio->kern_data_len;
1726 kern_sg_entries = 1;
1731 ext_watermark = ext_offset;
1733 for (i = ext_sg_start, j = 0;
1734 i < ext_sg_entries && j < kern_sg_entries;) {
1735 uint8_t *ext_ptr, *kern_ptr;
1737 len_to_copy = MIN(ext_sglist[i].len - ext_watermark,
1738 kern_sglist[j].len - kern_watermark);
1740 ext_ptr = (uint8_t *)ext_sglist[i].addr;
1741 ext_ptr = ext_ptr + ext_watermark;
1742 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
1746 panic("need to implement bus address support");
1748 kern_ptr = bus_to_virt(kern_sglist[j].addr);
1751 kern_ptr = (uint8_t *)kern_sglist[j].addr;
1752 kern_ptr = kern_ptr + kern_watermark;
1754 kern_watermark += len_to_copy;
1755 ext_watermark += len_to_copy;
1757 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
1759 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
1760 "bytes to user\n", len_to_copy));
1761 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
1762 "to %p\n", kern_ptr, ext_ptr));
1763 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
1764 ctl_set_internal_failure(ctsio,
1770 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
1771 "bytes from user\n", len_to_copy));
1772 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
1773 "to %p\n", ext_ptr, kern_ptr));
1774 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
1775 ctl_set_internal_failure(ctsio,
1782 len_copied += len_to_copy;
1784 if (ext_sglist[i].len == ext_watermark) {
1789 if (kern_sglist[j].len == kern_watermark) {
1795 ctsio->ext_data_filled += len_copied;
1797 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
1798 "kern_sg_entries: %d\n", ext_sg_entries,
1800 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
1801 "kern_data_len = %d\n", ctsio->ext_data_len,
1802 ctsio->kern_data_len));
1805 /* XXX KDM set residual?? */
1808 if (ext_sglist_malloced != 0)
1809 free(ext_sglist, M_CTL);
1811 return (CTL_RETVAL_COMPLETE);
1815 * Serialize a command that went down the "wrong" side, and so was sent to
1816 * this controller for execution. The logic is a little different than the
1817 * standard case in ctl_scsiio_precheck(). Errors in this case need to get
1818 * sent back to the other side, but in the success case, we execute the
1819 * command on this side (XFER mode) or tell the other side to execute it
1823 ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
1825 struct ctl_softc *softc;
1826 union ctl_ha_msg msg_info;
1827 struct ctl_lun *lun;
1831 softc = control_softc;
1833 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
1834 lun = softc->ctl_luns[targ_lun];
1838 * Why isn't LUN defined? The other side wouldn't
1839 * send a cmd if the LUN is undefined.
1841 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__);
1843 /* "Logical unit not supported" */
1844 ctl_set_sense_data(&msg_info.scsi.sense_data,
1846 /*sense_format*/SSD_TYPE_NONE,
1847 /*current_error*/ 1,
1848 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1853 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1854 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1855 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1856 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1857 msg_info.hdr.serializing_sc = NULL;
1858 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1859 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1860 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1866 mtx_lock(&lun->lun_lock);
1867 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1869 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
1870 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq,
1872 case CTL_ACTION_BLOCK:
1873 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
1874 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
1877 case CTL_ACTION_PASS:
1878 case CTL_ACTION_SKIP:
1879 if (softc->ha_mode == CTL_HA_MODE_XFER) {
1880 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
1881 ctl_enqueue_rtr((union ctl_io *)ctsio);
1884 /* send msg back to other side */
1885 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1886 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio;
1887 msg_info.hdr.msg_type = CTL_MSG_R2R;
1889 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc);
1891 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1892 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1896 case CTL_ACTION_OVERLAP:
1897 /* OVERLAPPED COMMANDS ATTEMPTED */
1898 ctl_set_sense_data(&msg_info.scsi.sense_data,
1900 /*sense_format*/SSD_TYPE_NONE,
1901 /*current_error*/ 1,
1902 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1907 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1908 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1909 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1910 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1911 msg_info.hdr.serializing_sc = NULL;
1912 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1914 printf("BAD JUJU:Major Bummer Overlap\n");
1916 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1918 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1919 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1922 case CTL_ACTION_OVERLAP_TAG:
1923 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */
1924 ctl_set_sense_data(&msg_info.scsi.sense_data,
1926 /*sense_format*/SSD_TYPE_NONE,
1927 /*current_error*/ 1,
1928 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
1930 /*ascq*/ ctsio->tag_num & 0xff,
1933 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1934 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1935 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1936 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1937 msg_info.hdr.serializing_sc = NULL;
1938 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1940 printf("BAD JUJU:Major Bummer Overlap Tag\n");
1942 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1944 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1945 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1948 case CTL_ACTION_ERROR:
1950 /* "Internal target failure" */
1951 ctl_set_sense_data(&msg_info.scsi.sense_data,
1953 /*sense_format*/SSD_TYPE_NONE,
1954 /*current_error*/ 1,
1955 /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
1960 msg_info.scsi.sense_len = SSD_FULL_SIZE;
1961 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
1962 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
1963 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc;
1964 msg_info.hdr.serializing_sc = NULL;
1965 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU;
1967 printf("BAD JUJU:Major Bummer HW Error\n");
1969 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links);
1971 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
1972 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) {
1976 mtx_unlock(&lun->lun_lock);
1981 ctl_ioctl_submit_wait(union ctl_io *io)
1983 struct ctl_fe_ioctl_params params;
1984 ctl_fe_ioctl_state last_state;
1989 bzero(¶ms, sizeof(params));
1991 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
1992 cv_init(¶ms.sem, "ctlioccv");
1993 params.state = CTL_IOCTL_INPROG;
1994 last_state = params.state;
1996 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms;
1998 CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n"));
2000 /* This shouldn't happen */
2001 if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE)
2007 mtx_lock(¶ms.ioctl_mtx);
2009 * Check the state here, and don't sleep if the state has
2010 * already changed (i.e. wakeup has already occured, but we
2011 * weren't waiting yet).
2013 if (params.state == last_state) {
2014 /* XXX KDM cv_wait_sig instead? */
2015 cv_wait(¶ms.sem, ¶ms.ioctl_mtx);
2017 last_state = params.state;
2019 switch (params.state) {
2020 case CTL_IOCTL_INPROG:
2021 /* Why did we wake up? */
2022 /* XXX KDM error here? */
2023 mtx_unlock(¶ms.ioctl_mtx);
2025 case CTL_IOCTL_DATAMOVE:
2026 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
2029 * change last_state back to INPROG to avoid
2030 * deadlock on subsequent data moves.
2032 params.state = last_state = CTL_IOCTL_INPROG;
2034 mtx_unlock(¶ms.ioctl_mtx);
2035 ctl_ioctl_do_datamove(&io->scsiio);
2037 * Note that in some cases, most notably writes,
2038 * this will queue the I/O and call us back later.
2039 * In other cases, generally reads, this routine
2040 * will immediately call back and wake us up,
2041 * probably using our own context.
2043 io->scsiio.be_move_done(io);
2045 case CTL_IOCTL_DONE:
2046 mtx_unlock(¶ms.ioctl_mtx);
2047 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
2051 mtx_unlock(¶ms.ioctl_mtx);
2052 /* XXX KDM error here? */
2055 } while (done == 0);
2057 mtx_destroy(¶ms.ioctl_mtx);
2058 cv_destroy(¶ms.sem);
2060 return (CTL_RETVAL_COMPLETE);
2064 ctl_ioctl_datamove(union ctl_io *io)
2066 struct ctl_fe_ioctl_params *params;
2068 params = (struct ctl_fe_ioctl_params *)
2069 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
2071 mtx_lock(¶ms->ioctl_mtx);
2072 params->state = CTL_IOCTL_DATAMOVE;
2073 cv_broadcast(¶ms->sem);
2074 mtx_unlock(¶ms->ioctl_mtx);
2078 ctl_ioctl_done(union ctl_io *io)
2080 struct ctl_fe_ioctl_params *params;
2082 params = (struct ctl_fe_ioctl_params *)
2083 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
2085 mtx_lock(¶ms->ioctl_mtx);
2086 params->state = CTL_IOCTL_DONE;
2087 cv_broadcast(¶ms->sem);
2088 mtx_unlock(¶ms->ioctl_mtx);
2092 ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask)
2094 struct ctl_fe_ioctl_startstop_info *sd_info;
2096 sd_info = (struct ctl_fe_ioctl_startstop_info *)arg;
2098 sd_info->hs_info.status = metatask->status;
2099 sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns;
2100 sd_info->hs_info.luns_complete =
2101 metatask->taskinfo.startstop.luns_complete;
2102 sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed;
2104 cv_broadcast(&sd_info->sem);
2108 ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask)
2110 struct ctl_fe_ioctl_bbrread_info *fe_bbr_info;
2112 fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg;
2114 mtx_lock(fe_bbr_info->lock);
2115 fe_bbr_info->bbr_info->status = metatask->status;
2116 fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status;
2117 fe_bbr_info->wakeup_done = 1;
2118 mtx_unlock(fe_bbr_info->lock);
2120 cv_broadcast(&fe_bbr_info->sem);
2124 * Returns 0 for success, errno for failure.
2127 ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num,
2128 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries)
2135 mtx_lock(&lun->lun_lock);
2136 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL);
2137 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
2139 struct ctl_ooa_entry *entry;
2142 * If we've got more than we can fit, just count the
2143 * remaining entries.
2145 if (*cur_fill_num >= ooa_hdr->alloc_num)
2148 entry = &kern_entries[*cur_fill_num];
2150 entry->tag_num = io->scsiio.tag_num;
2151 entry->lun_num = lun->lun;
2153 entry->start_bt = io->io_hdr.start_bt;
2155 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len);
2156 entry->cdb_len = io->scsiio.cdb_len;
2157 if (io->io_hdr.flags & CTL_FLAG_BLOCKED)
2158 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED;
2160 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG)
2161 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA;
2163 if (io->io_hdr.flags & CTL_FLAG_ABORT)
2164 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT;
2166 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR)
2167 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR;
2169 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED)
2170 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED;
2172 mtx_unlock(&lun->lun_lock);
2178 ctl_copyin_alloc(void *user_addr, int len, char *error_str,
2179 size_t error_str_len)
2183 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO);
2185 if (copyin(user_addr, kptr, len) != 0) {
2186 snprintf(error_str, error_str_len, "Error copying %d bytes "
2187 "from user address %p to kernel address %p", len,
2197 ctl_free_args(int num_args, struct ctl_be_arg *args)
2204 for (i = 0; i < num_args; i++) {
2205 free(args[i].kname, M_CTL);
2206 free(args[i].kvalue, M_CTL);
2212 static struct ctl_be_arg *
2213 ctl_copyin_args(int num_args, struct ctl_be_arg *uargs,
2214 char *error_str, size_t error_str_len)
2216 struct ctl_be_arg *args;
2219 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args),
2220 error_str, error_str_len);
2225 for (i = 0; i < num_args; i++) {
2226 args[i].kname = NULL;
2227 args[i].kvalue = NULL;
2230 for (i = 0; i < num_args; i++) {
2233 args[i].kname = ctl_copyin_alloc(args[i].name,
2234 args[i].namelen, error_str, error_str_len);
2235 if (args[i].kname == NULL)
2238 if (args[i].kname[args[i].namelen - 1] != '\0') {
2239 snprintf(error_str, error_str_len, "Argument %d "
2240 "name is not NUL-terminated", i);
2244 if (args[i].flags & CTL_BEARG_RD) {
2245 tmpptr = ctl_copyin_alloc(args[i].value,
2246 args[i].vallen, error_str, error_str_len);
2249 if ((args[i].flags & CTL_BEARG_ASCII)
2250 && (tmpptr[args[i].vallen - 1] != '\0')) {
2251 snprintf(error_str, error_str_len, "Argument "
2252 "%d value is not NUL-terminated", i);
2255 args[i].kvalue = tmpptr;
2257 args[i].kvalue = malloc(args[i].vallen,
2258 M_CTL, M_WAITOK | M_ZERO);
2265 ctl_free_args(num_args, args);
2271 ctl_copyout_args(int num_args, struct ctl_be_arg *args)
2275 for (i = 0; i < num_args; i++) {
2276 if (args[i].flags & CTL_BEARG_WR)
2277 copyout(args[i].kvalue, args[i].value, args[i].vallen);
2282 * Escape characters that are illegal or not recommended in XML.
2285 ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size)
2287 char *end = str + size;
2292 for (; *str && str < end; str++) {
2295 retval = sbuf_printf(sb, "&");
2298 retval = sbuf_printf(sb, ">");
2301 retval = sbuf_printf(sb, "<");
2304 retval = sbuf_putc(sb, *str);
2317 ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb)
2319 struct scsi_vpd_id_descriptor *desc;
2322 if (id == NULL || id->len < 4)
2324 desc = (struct scsi_vpd_id_descriptor *)id->data;
2325 switch (desc->id_type & SVPD_ID_TYPE_MASK) {
2326 case SVPD_ID_TYPE_T10:
2327 sbuf_printf(sb, "t10.");
2329 case SVPD_ID_TYPE_EUI64:
2330 sbuf_printf(sb, "eui.");
2332 case SVPD_ID_TYPE_NAA:
2333 sbuf_printf(sb, "naa.");
2335 case SVPD_ID_TYPE_SCSI_NAME:
2338 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) {
2339 case SVPD_ID_CODESET_BINARY:
2340 for (i = 0; i < desc->length; i++)
2341 sbuf_printf(sb, "%02x", desc->identifier[i]);
2343 case SVPD_ID_CODESET_ASCII:
2344 sbuf_printf(sb, "%.*s", (int)desc->length,
2345 (char *)desc->identifier);
2347 case SVPD_ID_CODESET_UTF8:
2348 sbuf_printf(sb, "%s", (char *)desc->identifier);
2354 ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
2357 struct ctl_softc *softc;
2360 softc = control_softc;
2370 * If we haven't been "enabled", don't allow any SCSI I/O
2373 if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) {
2378 io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref);
2381 * Need to save the pool reference so it doesn't get
2382 * spammed by the user's ctl_io.
2384 pool_tmp = io->io_hdr.pool;
2385 memcpy(io, (void *)addr, sizeof(*io));
2386 io->io_hdr.pool = pool_tmp;
2389 * No status yet, so make sure the status is set properly.
2391 io->io_hdr.status = CTL_STATUS_NONE;
2394 * The user sets the initiator ID, target and LUN IDs.
2396 io->io_hdr.nexus.targ_port = softc->ioctl_info.port.targ_port;
2397 io->io_hdr.flags |= CTL_FLAG_USER_REQ;
2398 if ((io->io_hdr.io_type == CTL_IO_SCSI)
2399 && (io->scsiio.tag_type != CTL_TAG_UNTAGGED))
2400 io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++;
2402 retval = ctl_ioctl_submit_wait(io);
2409 memcpy((void *)addr, io, sizeof(*io));
2411 /* return this to our pool */
2416 case CTL_ENABLE_PORT:
2417 case CTL_DISABLE_PORT:
2418 case CTL_SET_PORT_WWNS: {
2419 struct ctl_port *port;
2420 struct ctl_port_entry *entry;
2422 entry = (struct ctl_port_entry *)addr;
2424 mtx_lock(&softc->ctl_lock);
2425 STAILQ_FOREACH(port, &softc->port_list, links) {
2431 if ((entry->port_type == CTL_PORT_NONE)
2432 && (entry->targ_port == port->targ_port)) {
2434 * If the user only wants to enable or
2435 * disable or set WWNs on a specific port,
2436 * do the operation and we're done.
2440 } else if (entry->port_type & port->port_type) {
2442 * Compare the user's type mask with the
2443 * particular frontend type to see if we
2450 * Make sure the user isn't trying to set
2451 * WWNs on multiple ports at the same time.
2453 if (cmd == CTL_SET_PORT_WWNS) {
2454 printf("%s: Can't set WWNs on "
2455 "multiple ports\n", __func__);
2462 * XXX KDM we have to drop the lock here,
2463 * because the online/offline operations
2464 * can potentially block. We need to
2465 * reference count the frontends so they
2468 mtx_unlock(&softc->ctl_lock);
2470 if (cmd == CTL_ENABLE_PORT) {
2471 ctl_port_online(port);
2472 } else if (cmd == CTL_DISABLE_PORT) {
2473 ctl_port_offline(port);
2476 mtx_lock(&softc->ctl_lock);
2478 if (cmd == CTL_SET_PORT_WWNS)
2479 ctl_port_set_wwns(port,
2480 (entry->flags & CTL_PORT_WWNN_VALID) ?
2482 (entry->flags & CTL_PORT_WWPN_VALID) ?
2483 1 : 0, entry->wwpn);
2488 mtx_unlock(&softc->ctl_lock);
2491 case CTL_GET_PORT_LIST: {
2492 struct ctl_port *port;
2493 struct ctl_port_list *list;
2496 list = (struct ctl_port_list *)addr;
2498 if (list->alloc_len != (list->alloc_num *
2499 sizeof(struct ctl_port_entry))) {
2500 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != "
2501 "alloc_num %u * sizeof(struct ctl_port_entry) "
2502 "%zu\n", __func__, list->alloc_len,
2503 list->alloc_num, sizeof(struct ctl_port_entry));
2509 list->dropped_num = 0;
2511 mtx_lock(&softc->ctl_lock);
2512 STAILQ_FOREACH(port, &softc->port_list, links) {
2513 struct ctl_port_entry entry, *list_entry;
2515 if (list->fill_num >= list->alloc_num) {
2516 list->dropped_num++;
2520 entry.port_type = port->port_type;
2521 strlcpy(entry.port_name, port->port_name,
2522 sizeof(entry.port_name));
2523 entry.targ_port = port->targ_port;
2524 entry.physical_port = port->physical_port;
2525 entry.virtual_port = port->virtual_port;
2526 entry.wwnn = port->wwnn;
2527 entry.wwpn = port->wwpn;
2528 if (port->status & CTL_PORT_STATUS_ONLINE)
2533 list_entry = &list->entries[i];
2535 retval = copyout(&entry, list_entry, sizeof(entry));
2537 printf("%s: CTL_GET_PORT_LIST: copyout "
2538 "returned %d\n", __func__, retval);
2543 list->fill_len += sizeof(entry);
2545 mtx_unlock(&softc->ctl_lock);
2548 * If this is non-zero, we had a copyout fault, so there's
2549 * probably no point in attempting to set the status inside
2555 if (list->dropped_num > 0)
2556 list->status = CTL_PORT_LIST_NEED_MORE_SPACE;
2558 list->status = CTL_PORT_LIST_OK;
2561 case CTL_DUMP_OOA: {
2562 struct ctl_lun *lun;
2567 mtx_lock(&softc->ctl_lock);
2568 printf("Dumping OOA queues:\n");
2569 STAILQ_FOREACH(lun, &softc->lun_list, links) {
2570 mtx_lock(&lun->lun_lock);
2571 for (io = (union ctl_io *)TAILQ_FIRST(
2572 &lun->ooa_queue); io != NULL;
2573 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr,
2575 sbuf_new(&sb, printbuf, sizeof(printbuf),
2577 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ",
2581 CTL_FLAG_BLOCKED) ? "" : " BLOCKED",
2583 CTL_FLAG_DMA_INPROG) ? " DMA" : "",
2585 CTL_FLAG_ABORT) ? " ABORT" : "",
2587 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : "");
2588 ctl_scsi_command_string(&io->scsiio, NULL, &sb);
2590 printf("%s\n", sbuf_data(&sb));
2592 mtx_unlock(&lun->lun_lock);
2594 printf("OOA queues dump done\n");
2595 mtx_unlock(&softc->ctl_lock);
2599 struct ctl_lun *lun;
2600 struct ctl_ooa *ooa_hdr;
2601 struct ctl_ooa_entry *entries;
2602 uint32_t cur_fill_num;
2604 ooa_hdr = (struct ctl_ooa *)addr;
2606 if ((ooa_hdr->alloc_len == 0)
2607 || (ooa_hdr->alloc_num == 0)) {
2608 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u "
2609 "must be non-zero\n", __func__,
2610 ooa_hdr->alloc_len, ooa_hdr->alloc_num);
2615 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num *
2616 sizeof(struct ctl_ooa_entry))) {
2617 printf("%s: CTL_GET_OOA: alloc len %u must be alloc "
2618 "num %d * sizeof(struct ctl_ooa_entry) %zd\n",
2619 __func__, ooa_hdr->alloc_len,
2620 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry));
2625 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO);
2626 if (entries == NULL) {
2627 printf("%s: could not allocate %d bytes for OOA "
2628 "dump\n", __func__, ooa_hdr->alloc_len);
2633 mtx_lock(&softc->ctl_lock);
2634 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0)
2635 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS)
2636 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) {
2637 mtx_unlock(&softc->ctl_lock);
2638 free(entries, M_CTL);
2639 printf("%s: CTL_GET_OOA: invalid LUN %ju\n",
2640 __func__, (uintmax_t)ooa_hdr->lun_num);
2647 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) {
2648 STAILQ_FOREACH(lun, &softc->lun_list, links) {
2649 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,
2655 mtx_unlock(&softc->ctl_lock);
2656 free(entries, M_CTL);
2660 lun = softc->ctl_luns[ooa_hdr->lun_num];
2662 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr,
2665 mtx_unlock(&softc->ctl_lock);
2667 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num);
2668 ooa_hdr->fill_len = ooa_hdr->fill_num *
2669 sizeof(struct ctl_ooa_entry);
2670 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len);
2672 printf("%s: error copying out %d bytes for OOA dump\n",
2673 __func__, ooa_hdr->fill_len);
2676 getbintime(&ooa_hdr->cur_bt);
2678 if (cur_fill_num > ooa_hdr->alloc_num) {
2679 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num;
2680 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE;
2682 ooa_hdr->dropped_num = 0;
2683 ooa_hdr->status = CTL_OOA_OK;
2686 free(entries, M_CTL);
2689 case CTL_CHECK_OOA: {
2691 struct ctl_lun *lun;
2692 struct ctl_ooa_info *ooa_info;
2695 ooa_info = (struct ctl_ooa_info *)addr;
2697 if (ooa_info->lun_id >= CTL_MAX_LUNS) {
2698 ooa_info->status = CTL_OOA_INVALID_LUN;
2701 mtx_lock(&softc->ctl_lock);
2702 lun = softc->ctl_luns[ooa_info->lun_id];
2704 mtx_unlock(&softc->ctl_lock);
2705 ooa_info->status = CTL_OOA_INVALID_LUN;
2708 mtx_lock(&lun->lun_lock);
2709 mtx_unlock(&softc->ctl_lock);
2710 ooa_info->num_entries = 0;
2711 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
2712 io != NULL; io = (union ctl_io *)TAILQ_NEXT(
2713 &io->io_hdr, ooa_links)) {
2714 ooa_info->num_entries++;
2716 mtx_unlock(&lun->lun_lock);
2718 ooa_info->status = CTL_OOA_SUCCESS;
2722 case CTL_HARD_START:
2723 case CTL_HARD_STOP: {
2724 struct ctl_fe_ioctl_startstop_info ss_info;
2725 struct cfi_metatask *metatask;
2728 mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF);
2730 cv_init(&ss_info.sem, "hard start/stop cv" );
2732 metatask = cfi_alloc_metatask(/*can_wait*/ 1);
2733 if (metatask == NULL) {
2735 mtx_destroy(&hs_mtx);
2739 if (cmd == CTL_HARD_START)
2740 metatask->tasktype = CFI_TASK_STARTUP;
2742 metatask->tasktype = CFI_TASK_SHUTDOWN;
2744 metatask->callback = ctl_ioctl_hard_startstop_callback;
2745 metatask->callback_arg = &ss_info;
2747 cfi_action(metatask);
2749 /* Wait for the callback */
2751 cv_wait_sig(&ss_info.sem, &hs_mtx);
2752 mtx_unlock(&hs_mtx);
2755 * All information has been copied from the metatask by the
2756 * time cv_broadcast() is called, so we free the metatask here.
2758 cfi_free_metatask(metatask);
2760 memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info));
2762 mtx_destroy(&hs_mtx);
2766 struct ctl_bbrread_info *bbr_info;
2767 struct ctl_fe_ioctl_bbrread_info fe_bbr_info;
2769 struct cfi_metatask *metatask;
2771 bbr_info = (struct ctl_bbrread_info *)addr;
2773 bzero(&fe_bbr_info, sizeof(fe_bbr_info));
2775 bzero(&bbr_mtx, sizeof(bbr_mtx));
2776 mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF);
2778 fe_bbr_info.bbr_info = bbr_info;
2779 fe_bbr_info.lock = &bbr_mtx;
2781 cv_init(&fe_bbr_info.sem, "BBR read cv");
2782 metatask = cfi_alloc_metatask(/*can_wait*/ 1);
2784 if (metatask == NULL) {
2785 mtx_destroy(&bbr_mtx);
2786 cv_destroy(&fe_bbr_info.sem);
2790 metatask->tasktype = CFI_TASK_BBRREAD;
2791 metatask->callback = ctl_ioctl_bbrread_callback;
2792 metatask->callback_arg = &fe_bbr_info;
2793 metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num;
2794 metatask->taskinfo.bbrread.lba = bbr_info->lba;
2795 metatask->taskinfo.bbrread.len = bbr_info->len;
2797 cfi_action(metatask);
2800 while (fe_bbr_info.wakeup_done == 0)
2801 cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx);
2802 mtx_unlock(&bbr_mtx);
2804 bbr_info->status = metatask->status;
2805 bbr_info->bbr_status = metatask->taskinfo.bbrread.status;
2806 bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status;
2807 memcpy(&bbr_info->sense_data,
2808 &metatask->taskinfo.bbrread.sense_data,
2809 MIN(sizeof(bbr_info->sense_data),
2810 sizeof(metatask->taskinfo.bbrread.sense_data)));
2812 cfi_free_metatask(metatask);
2814 mtx_destroy(&bbr_mtx);
2815 cv_destroy(&fe_bbr_info.sem);
2819 case CTL_DELAY_IO: {
2820 struct ctl_io_delay_info *delay_info;
2822 struct ctl_lun *lun;
2823 #endif /* CTL_IO_DELAY */
2825 delay_info = (struct ctl_io_delay_info *)addr;
2828 mtx_lock(&softc->ctl_lock);
2830 if ((delay_info->lun_id >= CTL_MAX_LUNS)
2831 || (softc->ctl_luns[delay_info->lun_id] == NULL)) {
2832 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
2834 lun = softc->ctl_luns[delay_info->lun_id];
2835 mtx_lock(&lun->lun_lock);
2837 delay_info->status = CTL_DELAY_STATUS_OK;
2839 switch (delay_info->delay_type) {
2840 case CTL_DELAY_TYPE_CONT:
2842 case CTL_DELAY_TYPE_ONESHOT:
2845 delay_info->status =
2846 CTL_DELAY_STATUS_INVALID_TYPE;
2850 switch (delay_info->delay_loc) {
2851 case CTL_DELAY_LOC_DATAMOVE:
2852 lun->delay_info.datamove_type =
2853 delay_info->delay_type;
2854 lun->delay_info.datamove_delay =
2855 delay_info->delay_secs;
2857 case CTL_DELAY_LOC_DONE:
2858 lun->delay_info.done_type =
2859 delay_info->delay_type;
2860 lun->delay_info.done_delay =
2861 delay_info->delay_secs;
2864 delay_info->status =
2865 CTL_DELAY_STATUS_INVALID_LOC;
2868 mtx_unlock(&lun->lun_lock);
2871 mtx_unlock(&softc->ctl_lock);
2873 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED;
2874 #endif /* CTL_IO_DELAY */
2877 case CTL_REALSYNC_SET: {
2880 syncstate = (int *)addr;
2882 mtx_lock(&softc->ctl_lock);
2883 switch (*syncstate) {
2885 softc->flags &= ~CTL_FLAG_REAL_SYNC;
2888 softc->flags |= CTL_FLAG_REAL_SYNC;
2894 mtx_unlock(&softc->ctl_lock);
2897 case CTL_REALSYNC_GET: {
2900 syncstate = (int*)addr;
2902 mtx_lock(&softc->ctl_lock);
2903 if (softc->flags & CTL_FLAG_REAL_SYNC)
2907 mtx_unlock(&softc->ctl_lock);
2913 struct ctl_sync_info *sync_info;
2914 struct ctl_lun *lun;
2916 sync_info = (struct ctl_sync_info *)addr;
2918 mtx_lock(&softc->ctl_lock);
2919 lun = softc->ctl_luns[sync_info->lun_id];
2921 mtx_unlock(&softc->ctl_lock);
2922 sync_info->status = CTL_GS_SYNC_NO_LUN;
2925 * Get or set the sync interval. We're not bounds checking
2926 * in the set case, hopefully the user won't do something
2929 mtx_lock(&lun->lun_lock);
2930 mtx_unlock(&softc->ctl_lock);
2931 if (cmd == CTL_GETSYNC)
2932 sync_info->sync_interval = lun->sync_interval;
2934 lun->sync_interval = sync_info->sync_interval;
2935 mtx_unlock(&lun->lun_lock);
2937 sync_info->status = CTL_GS_SYNC_OK;
2941 case CTL_GETSTATS: {
2942 struct ctl_stats *stats;
2943 struct ctl_lun *lun;
2946 stats = (struct ctl_stats *)addr;
2948 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) >
2950 stats->status = CTL_SS_NEED_MORE_SPACE;
2951 stats->num_luns = softc->num_luns;
2955 * XXX KDM no locking here. If the LUN list changes,
2956 * things can blow up.
2958 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL;
2959 i++, lun = STAILQ_NEXT(lun, links)) {
2960 retval = copyout(&lun->stats, &stats->lun_stats[i],
2961 sizeof(lun->stats));
2965 stats->num_luns = softc->num_luns;
2966 stats->fill_len = sizeof(struct ctl_lun_io_stats) *
2968 stats->status = CTL_SS_OK;
2970 stats->flags = CTL_STATS_FLAG_TIME_VALID;
2972 stats->flags = CTL_STATS_FLAG_NONE;
2974 getnanouptime(&stats->timestamp);
2977 case CTL_ERROR_INJECT: {
2978 struct ctl_error_desc *err_desc, *new_err_desc;
2979 struct ctl_lun *lun;
2981 err_desc = (struct ctl_error_desc *)addr;
2983 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL,
2985 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
2987 mtx_lock(&softc->ctl_lock);
2988 lun = softc->ctl_luns[err_desc->lun_id];
2990 mtx_unlock(&softc->ctl_lock);
2991 free(new_err_desc, M_CTL);
2992 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n",
2993 __func__, (uintmax_t)err_desc->lun_id);
2997 mtx_lock(&lun->lun_lock);
2998 mtx_unlock(&softc->ctl_lock);
3001 * We could do some checking here to verify the validity
3002 * of the request, but given the complexity of error
3003 * injection requests, the checking logic would be fairly
3006 * For now, if the request is invalid, it just won't get
3007 * executed and might get deleted.
3009 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links);
3012 * XXX KDM check to make sure the serial number is unique,
3013 * in case we somehow manage to wrap. That shouldn't
3014 * happen for a very long time, but it's the right thing to
3017 new_err_desc->serial = lun->error_serial;
3018 err_desc->serial = lun->error_serial;
3019 lun->error_serial++;
3021 mtx_unlock(&lun->lun_lock);
3024 case CTL_ERROR_INJECT_DELETE: {
3025 struct ctl_error_desc *delete_desc, *desc, *desc2;
3026 struct ctl_lun *lun;
3029 delete_desc = (struct ctl_error_desc *)addr;
3032 mtx_lock(&softc->ctl_lock);
3033 lun = softc->ctl_luns[delete_desc->lun_id];
3035 mtx_unlock(&softc->ctl_lock);
3036 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
3037 __func__, (uintmax_t)delete_desc->lun_id);
3041 mtx_lock(&lun->lun_lock);
3042 mtx_unlock(&softc->ctl_lock);
3043 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
3044 if (desc->serial != delete_desc->serial)
3047 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc,
3052 mtx_unlock(&lun->lun_lock);
3053 if (delete_done == 0) {
3054 printf("%s: CTL_ERROR_INJECT_DELETE: can't find "
3055 "error serial %ju on LUN %u\n", __func__,
3056 delete_desc->serial, delete_desc->lun_id);
3062 case CTL_DUMP_STRUCTS: {
3064 struct ctl_port *port;
3065 struct ctl_frontend *fe;
3067 mtx_lock(&softc->ctl_lock);
3068 printf("CTL Persistent Reservation information start:\n");
3069 for (i = 0; i < CTL_MAX_LUNS; i++) {
3070 struct ctl_lun *lun;
3072 lun = softc->ctl_luns[i];
3075 || ((lun->flags & CTL_LUN_DISABLED) != 0))
3078 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) {
3079 if (lun->pr_keys[j] == NULL)
3081 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
3082 if (lun->pr_keys[j][k] == 0)
3084 printf(" LUN %d port %d iid %d key "
3086 (uintmax_t)lun->pr_keys[j][k]);
3090 printf("CTL Persistent Reservation information end\n");
3091 printf("CTL Ports:\n");
3092 STAILQ_FOREACH(port, &softc->port_list, links) {
3093 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN "
3094 "%#jx WWPN %#jx\n", port->targ_port, port->port_name,
3095 port->frontend->name, port->port_type,
3096 port->physical_port, port->virtual_port,
3097 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn);
3098 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
3099 if (port->wwpn_iid[j].in_use == 0 &&
3100 port->wwpn_iid[j].wwpn == 0 &&
3101 port->wwpn_iid[j].name == NULL)
3104 printf(" iid %u use %d WWPN %#jx '%s'\n",
3105 j, port->wwpn_iid[j].in_use,
3106 (uintmax_t)port->wwpn_iid[j].wwpn,
3107 port->wwpn_iid[j].name);
3110 printf("CTL Port information end\n");
3111 mtx_unlock(&softc->ctl_lock);
3113 * XXX KDM calling this without a lock. We'd likely want
3114 * to drop the lock before calling the frontend's dump
3117 printf("CTL Frontends:\n");
3118 STAILQ_FOREACH(fe, &softc->fe_list, links) {
3119 printf(" Frontend '%s'\n", fe->name);
3120 if (fe->fe_dump != NULL)
3123 printf("CTL Frontend information end\n");
3127 struct ctl_lun_req *lun_req;
3128 struct ctl_backend_driver *backend;
3130 lun_req = (struct ctl_lun_req *)addr;
3132 backend = ctl_backend_find(lun_req->backend);
3133 if (backend == NULL) {
3134 lun_req->status = CTL_LUN_ERROR;
3135 snprintf(lun_req->error_str,
3136 sizeof(lun_req->error_str),
3137 "Backend \"%s\" not found.",
3141 if (lun_req->num_be_args > 0) {
3142 lun_req->kern_be_args = ctl_copyin_args(
3143 lun_req->num_be_args,
3146 sizeof(lun_req->error_str));
3147 if (lun_req->kern_be_args == NULL) {
3148 lun_req->status = CTL_LUN_ERROR;
3153 retval = backend->ioctl(dev, cmd, addr, flag, td);
3155 if (lun_req->num_be_args > 0) {
3156 ctl_copyout_args(lun_req->num_be_args,
3157 lun_req->kern_be_args);
3158 ctl_free_args(lun_req->num_be_args,
3159 lun_req->kern_be_args);
3163 case CTL_LUN_LIST: {
3165 struct ctl_lun *lun;
3166 struct ctl_lun_list *list;
3167 struct ctl_option *opt;
3169 list = (struct ctl_lun_list *)addr;
3172 * Allocate a fixed length sbuf here, based on the length
3173 * of the user's buffer. We could allocate an auto-extending
3174 * buffer, and then tell the user how much larger our
3175 * amount of data is than his buffer, but that presents
3178 * 1. The sbuf(9) routines use a blocking malloc, and so
3179 * we can't hold a lock while calling them with an
3180 * auto-extending buffer.
3182 * 2. There is not currently a LUN reference counting
3183 * mechanism, outside of outstanding transactions on
3184 * the LUN's OOA queue. So a LUN could go away on us
3185 * while we're getting the LUN number, backend-specific
3186 * information, etc. Thus, given the way things
3187 * currently work, we need to hold the CTL lock while
3188 * grabbing LUN information.
3190 * So, from the user's standpoint, the best thing to do is
3191 * allocate what he thinks is a reasonable buffer length,
3192 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error,
3193 * double the buffer length and try again. (And repeat
3194 * that until he succeeds.)
3196 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
3198 list->status = CTL_LUN_LIST_ERROR;
3199 snprintf(list->error_str, sizeof(list->error_str),
3200 "Unable to allocate %d bytes for LUN list",
3205 sbuf_printf(sb, "<ctllunlist>\n");
3207 mtx_lock(&softc->ctl_lock);
3208 STAILQ_FOREACH(lun, &softc->lun_list, links) {
3209 mtx_lock(&lun->lun_lock);
3210 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n",
3211 (uintmax_t)lun->lun);
3214 * Bail out as soon as we see that we've overfilled
3220 retval = sbuf_printf(sb, "\t<backend_type>%s"
3221 "</backend_type>\n",
3222 (lun->backend == NULL) ? "none" :
3223 lun->backend->name);
3228 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n",
3229 lun->be_lun->lun_type);
3234 if (lun->backend == NULL) {
3235 retval = sbuf_printf(sb, "</lun>\n");
3241 retval = sbuf_printf(sb, "\t<size>%ju</size>\n",
3242 (lun->be_lun->maxlba > 0) ?
3243 lun->be_lun->maxlba + 1 : 0);
3248 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n",
3249 lun->be_lun->blocksize);
3254 retval = sbuf_printf(sb, "\t<serial_number>");
3259 retval = ctl_sbuf_printf_esc(sb,
3260 lun->be_lun->serial_num,
3261 sizeof(lun->be_lun->serial_num));
3266 retval = sbuf_printf(sb, "</serial_number>\n");
3271 retval = sbuf_printf(sb, "\t<device_id>");
3276 retval = ctl_sbuf_printf_esc(sb,
3277 lun->be_lun->device_id,
3278 sizeof(lun->be_lun->device_id));
3283 retval = sbuf_printf(sb, "</device_id>\n");
3288 if (lun->backend->lun_info != NULL) {
3289 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb);
3293 STAILQ_FOREACH(opt, &lun->be_lun->options, links) {
3294 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
3295 opt->name, opt->value, opt->name);
3300 retval = sbuf_printf(sb, "</lun>\n");
3304 mtx_unlock(&lun->lun_lock);
3307 mtx_unlock(&lun->lun_lock);
3308 mtx_unlock(&softc->ctl_lock);
3311 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) {
3314 list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3315 snprintf(list->error_str, sizeof(list->error_str),
3316 "Out of space, %d bytes is too small",
3323 retval = copyout(sbuf_data(sb), list->lun_xml,
3326 list->fill_len = sbuf_len(sb) + 1;
3327 list->status = CTL_LUN_LIST_OK;
3332 struct ctl_iscsi *ci;
3333 struct ctl_frontend *fe;
3335 ci = (struct ctl_iscsi *)addr;
3337 fe = ctl_frontend_find("iscsi");
3339 ci->status = CTL_ISCSI_ERROR;
3340 snprintf(ci->error_str, sizeof(ci->error_str),
3341 "Frontend \"iscsi\" not found.");
3345 retval = fe->ioctl(dev, cmd, addr, flag, td);
3348 case CTL_PORT_REQ: {
3349 struct ctl_req *req;
3350 struct ctl_frontend *fe;
3352 req = (struct ctl_req *)addr;
3354 fe = ctl_frontend_find(req->driver);
3356 req->status = CTL_LUN_ERROR;
3357 snprintf(req->error_str, sizeof(req->error_str),
3358 "Frontend \"%s\" not found.", req->driver);
3361 if (req->num_args > 0) {
3362 req->kern_args = ctl_copyin_args(req->num_args,
3363 req->args, req->error_str, sizeof(req->error_str));
3364 if (req->kern_args == NULL) {
3365 req->status = CTL_LUN_ERROR;
3370 retval = fe->ioctl(dev, cmd, addr, flag, td);
3372 if (req->num_args > 0) {
3373 ctl_copyout_args(req->num_args, req->kern_args);
3374 ctl_free_args(req->num_args, req->kern_args);
3378 case CTL_PORT_LIST: {
3380 struct ctl_port *port;
3381 struct ctl_lun_list *list;
3382 struct ctl_option *opt;
3386 list = (struct ctl_lun_list *)addr;
3388 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN);
3390 list->status = CTL_LUN_LIST_ERROR;
3391 snprintf(list->error_str, sizeof(list->error_str),
3392 "Unable to allocate %d bytes for LUN list",
3397 sbuf_printf(sb, "<ctlportlist>\n");
3399 mtx_lock(&softc->ctl_lock);
3400 STAILQ_FOREACH(port, &softc->port_list, links) {
3401 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n",
3402 (uintmax_t)port->targ_port);
3405 * Bail out as soon as we see that we've overfilled
3411 retval = sbuf_printf(sb, "\t<frontend_type>%s"
3412 "</frontend_type>\n", port->frontend->name);
3416 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n",
3421 retval = sbuf_printf(sb, "\t<online>%s</online>\n",
3422 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO");
3426 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n",
3431 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n",
3432 port->physical_port);
3436 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n",
3437 port->virtual_port);
3441 if (port->target_devid != NULL) {
3442 sbuf_printf(sb, "\t<target>");
3443 ctl_id_sbuf(port->target_devid, sb);
3444 sbuf_printf(sb, "</target>\n");
3447 if (port->port_devid != NULL) {
3448 sbuf_printf(sb, "\t<port>");
3449 ctl_id_sbuf(port->port_devid, sb);
3450 sbuf_printf(sb, "</port>\n");
3453 if (port->port_info != NULL) {
3454 retval = port->port_info(port->onoff_arg, sb);
3458 STAILQ_FOREACH(opt, &port->options, links) {
3459 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n",
3460 opt->name, opt->value, opt->name);
3465 if (port->lun_map != NULL) {
3466 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n");
3467 for (j = 0; j < CTL_MAX_LUNS; j++) {
3468 plun = ctl_lun_map_from_port(port, j);
3469 if (plun >= CTL_MAX_LUNS)
3472 "\t<lun id=\"%u\">%u</lun>\n",
3477 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
3478 if (port->wwpn_iid[j].in_use == 0 ||
3479 (port->wwpn_iid[j].wwpn == 0 &&
3480 port->wwpn_iid[j].name == NULL))
3483 if (port->wwpn_iid[j].name != NULL)
3484 retval = sbuf_printf(sb,
3485 "\t<initiator id=\"%u\">%s</initiator>\n",
3486 j, port->wwpn_iid[j].name);
3488 retval = sbuf_printf(sb,
3489 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n",
3490 j, port->wwpn_iid[j].wwpn);
3497 retval = sbuf_printf(sb, "</targ_port>\n");
3501 mtx_unlock(&softc->ctl_lock);
3504 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) {
3507 list->status = CTL_LUN_LIST_NEED_MORE_SPACE;
3508 snprintf(list->error_str, sizeof(list->error_str),
3509 "Out of space, %d bytes is too small",
3516 retval = copyout(sbuf_data(sb), list->lun_xml,
3519 list->fill_len = sbuf_len(sb) + 1;
3520 list->status = CTL_LUN_LIST_OK;
3525 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr;
3526 struct ctl_port *port;
3528 mtx_lock(&softc->ctl_lock);
3529 if (lm->port >= CTL_MAX_PORTS ||
3530 (port = softc->ctl_ports[lm->port]) == NULL) {
3531 mtx_unlock(&softc->ctl_lock);
3534 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps
3535 if (lm->plun < CTL_MAX_LUNS) {
3536 if (lm->lun == UINT32_MAX)
3537 retval = ctl_lun_map_unset(port, lm->plun);
3538 else if (lm->lun < CTL_MAX_LUNS &&
3539 softc->ctl_luns[lm->lun] != NULL)
3540 retval = ctl_lun_map_set(port, lm->plun, lm->lun);
3543 } else if (lm->plun == UINT32_MAX) {
3544 if (lm->lun == UINT32_MAX)
3545 retval = ctl_lun_map_deinit(port);
3547 retval = ctl_lun_map_init(port);
3553 /* XXX KDM should we fix this? */
3555 struct ctl_backend_driver *backend;
3562 * We encode the backend type as the ioctl type for backend
3563 * ioctls. So parse it out here, and then search for a
3564 * backend of this type.
3566 type = _IOC_TYPE(cmd);
3568 STAILQ_FOREACH(backend, &softc->be_list, links) {
3569 if (backend->type == type) {
3575 printf("ctl: unknown ioctl command %#lx or backend "
3580 retval = backend->ioctl(dev, cmd, addr, flag, td);
3590 ctl_get_initindex(struct ctl_nexus *nexus)
3592 if (nexus->targ_port < CTL_MAX_PORTS)
3593 return (nexus->initid.id +
3594 (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3596 return (nexus->initid.id +
3597 ((nexus->targ_port - CTL_MAX_PORTS) *
3598 CTL_MAX_INIT_PER_PORT));
3602 ctl_get_resindex(struct ctl_nexus *nexus)
3604 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT));
3608 ctl_port_idx(int port_num)
3610 if (port_num < CTL_MAX_PORTS)
3613 return(port_num - CTL_MAX_PORTS);
3617 ctl_lun_map_init(struct ctl_port *port)
3619 struct ctl_softc *softc = control_softc;
3620 struct ctl_lun *lun;
3623 if (port->lun_map == NULL)
3624 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS,
3626 if (port->lun_map == NULL)
3628 for (i = 0; i < CTL_MAX_LUNS; i++)
3629 port->lun_map[i] = UINT32_MAX;
3630 if (port->status & CTL_PORT_STATUS_ONLINE) {
3631 STAILQ_FOREACH(lun, &softc->lun_list, links)
3632 port->lun_disable(port->targ_lun_arg, lun->lun);
3638 ctl_lun_map_deinit(struct ctl_port *port)
3640 struct ctl_softc *softc = control_softc;
3641 struct ctl_lun *lun;
3643 if (port->lun_map == NULL)
3645 free(port->lun_map, M_CTL);
3646 port->lun_map = NULL;
3647 if (port->status & CTL_PORT_STATUS_ONLINE) {
3648 STAILQ_FOREACH(lun, &softc->lun_list, links)
3649 port->lun_enable(port->targ_lun_arg, lun->lun);
3655 ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun)
3660 if (port->lun_map == NULL) {
3661 status = ctl_lun_map_init(port);
3665 old = port->lun_map[plun];
3666 port->lun_map[plun] = glun;
3667 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS)
3668 port->lun_enable(port->targ_lun_arg, plun);
3673 ctl_lun_map_unset(struct ctl_port *port, uint32_t plun)
3677 if (port->lun_map == NULL)
3679 old = port->lun_map[plun];
3680 port->lun_map[plun] = UINT32_MAX;
3681 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS)
3682 port->lun_disable(port->targ_lun_arg, plun);
3687 ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id)
3691 return (UINT32_MAX);
3692 if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS)
3694 return (port->lun_map[lun_id]);
3698 ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id)
3703 return (UINT32_MAX);
3704 if (port->lun_map == NULL)
3706 for (i = 0; i < CTL_MAX_LUNS; i++) {
3707 if (port->lun_map[i] == lun_id)
3710 return (UINT32_MAX);
3713 static struct ctl_port *
3714 ctl_io_port(struct ctl_io_hdr *io_hdr)
3718 port_num = io_hdr->nexus.targ_port;
3719 return (control_softc->ctl_ports[ctl_port_idx(port_num)]);
3723 * Note: This only works for bitmask sizes that are at least 32 bits, and
3724 * that are a power of 2.
3727 ctl_ffz(uint32_t *mask, uint32_t size)
3729 uint32_t num_chunks, num_pieces;
3732 num_chunks = (size >> 5);
3733 if (num_chunks == 0)
3735 num_pieces = MIN((sizeof(uint32_t) * 8), size);
3737 for (i = 0; i < num_chunks; i++) {
3738 for (j = 0; j < num_pieces; j++) {
3739 if ((mask[i] & (1 << j)) == 0)
3740 return ((i << 5) + j);
3748 ctl_set_mask(uint32_t *mask, uint32_t bit)
3750 uint32_t chunk, piece;
3753 piece = bit % (sizeof(uint32_t) * 8);
3755 if ((mask[chunk] & (1 << piece)) != 0)
3758 mask[chunk] |= (1 << piece);
3764 ctl_clear_mask(uint32_t *mask, uint32_t bit)
3766 uint32_t chunk, piece;
3769 piece = bit % (sizeof(uint32_t) * 8);
3771 if ((mask[chunk] & (1 << piece)) == 0)
3774 mask[chunk] &= ~(1 << piece);
3780 ctl_is_set(uint32_t *mask, uint32_t bit)
3782 uint32_t chunk, piece;
3785 piece = bit % (sizeof(uint32_t) * 8);
3787 if ((mask[chunk] & (1 << piece)) == 0)
3794 ctl_get_prkey(struct ctl_lun *lun, uint32_t residx)
3798 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3801 return (t[residx % CTL_MAX_INIT_PER_PORT]);
3805 ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx)
3809 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3812 t[residx % CTL_MAX_INIT_PER_PORT] = 0;
3816 ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx)
3821 i = residx/CTL_MAX_INIT_PER_PORT;
3822 if (lun->pr_keys[i] != NULL)
3824 mtx_unlock(&lun->lun_lock);
3825 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL,
3827 mtx_lock(&lun->lun_lock);
3828 if (lun->pr_keys[i] == NULL)
3829 lun->pr_keys[i] = p;
3835 ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key)
3839 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT];
3840 KASSERT(t != NULL, ("prkey %d is not allocated", residx));
3841 t[residx % CTL_MAX_INIT_PER_PORT] = key;
3845 * ctl_softc, pool_name, total_ctl_io are passed in.
3846 * npool is passed out.
3849 ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
3850 uint32_t total_ctl_io, void **npool)
3853 struct ctl_io_pool *pool;
3855 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
3860 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name);
3861 pool->ctl_softc = ctl_softc;
3862 pool->zone = uma_zsecond_create(pool->name, NULL,
3863 NULL, NULL, NULL, ctl_softc->io_zone);
3864 /* uma_prealloc(pool->zone, total_ctl_io); */
3868 *npool = ctl_softc->io_zone;
3874 ctl_pool_free(struct ctl_io_pool *pool)
3881 uma_zdestroy(pool->zone);
3887 ctl_alloc_io(void *pool_ref)
3891 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
3893 io = uma_zalloc(pool->zone, M_WAITOK);
3895 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK);
3898 io->io_hdr.pool = pool_ref;
3903 ctl_alloc_io_nowait(void *pool_ref)
3907 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
3909 io = uma_zalloc(pool->zone, M_NOWAIT);
3911 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT);
3914 io->io_hdr.pool = pool_ref;
3919 ctl_free_io(union ctl_io *io)
3922 struct ctl_io_pool *pool;
3929 pool = (struct ctl_io_pool *)io->io_hdr.pool;
3930 uma_zfree(pool->zone, io);
3932 uma_zfree((uma_zone_t)io->io_hdr.pool, io);
3937 ctl_zero_io(union ctl_io *io)
3945 * May need to preserve linked list pointers at some point too.
3947 pool_ref = io->io_hdr.pool;
3948 memset(io, 0, sizeof(*io));
3949 io->io_hdr.pool = pool_ref;
3953 * This routine is currently used for internal copies of ctl_ios that need
3954 * to persist for some reason after we've already returned status to the
3955 * FETD. (Thus the flag set.)
3958 * Note that this makes a blind copy of all fields in the ctl_io, except
3959 * for the pool reference. This includes any memory that has been
3960 * allocated! That memory will no longer be valid after done has been
3961 * called, so this would be VERY DANGEROUS for command that actually does
3962 * any reads or writes. Right now (11/7/2005), this is only used for immediate
3963 * start and stop commands, which don't transfer any data, so this is not a
3964 * problem. If it is used for anything else, the caller would also need to
3965 * allocate data buffer space and this routine would need to be modified to
3966 * copy the data buffer(s) as well.
3969 ctl_copy_io(union ctl_io *src, union ctl_io *dest)
3978 * May need to preserve linked list pointers at some point too.
3980 pool_ref = dest->io_hdr.pool;
3982 memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest)));
3984 dest->io_hdr.pool = pool_ref;
3986 * We need to know that this is an internal copy, and doesn't need
3987 * to get passed back to the FETD that allocated it.
3989 dest->io_hdr.flags |= CTL_FLAG_INT_COPY;
3993 ctl_expand_number(const char *buf, uint64_t *num)
3999 number = strtoq(buf, &endptr, 0);
4001 switch (tolower((unsigned char)*endptr)) {
4021 case '\0': /* No unit. */
4025 /* Unrecognized unit. */
4029 if ((number << shift) >> shift != number) {
4033 *num = number << shift;
4039 * This routine could be used in the future to load default and/or saved
4040 * mode page parameters for a particuar lun.
4043 ctl_init_page_index(struct ctl_lun *lun)
4046 struct ctl_page_index *page_index;
4050 memcpy(&lun->mode_pages.index, page_index_template,
4051 sizeof(page_index_template));
4053 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
4055 page_index = &lun->mode_pages.index[i];
4057 * If this is a disk-only mode page, there's no point in
4058 * setting it up. For some pages, we have to have some
4059 * basic information about the disk in order to calculate the
4062 if ((lun->be_lun->lun_type != T_DIRECT)
4063 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY))
4066 switch (page_index->page_code & SMPH_PC_MASK) {
4067 case SMS_RW_ERROR_RECOVERY_PAGE: {
4068 if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4069 panic("subpage is incorrect!");
4070 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT],
4071 &rw_er_page_default,
4072 sizeof(rw_er_page_default));
4073 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE],
4074 &rw_er_page_changeable,
4075 sizeof(rw_er_page_changeable));
4076 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT],
4077 &rw_er_page_default,
4078 sizeof(rw_er_page_default));
4079 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED],
4080 &rw_er_page_default,
4081 sizeof(rw_er_page_default));
4082 page_index->page_data =
4083 (uint8_t *)lun->mode_pages.rw_er_page;
4086 case SMS_FORMAT_DEVICE_PAGE: {
4087 struct scsi_format_page *format_page;
4089 if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4090 panic("subpage is incorrect!");
4093 * Sectors per track are set above. Bytes per
4094 * sector need to be set here on a per-LUN basis.
4096 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT],
4097 &format_page_default,
4098 sizeof(format_page_default));
4099 memcpy(&lun->mode_pages.format_page[
4100 CTL_PAGE_CHANGEABLE], &format_page_changeable,
4101 sizeof(format_page_changeable));
4102 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT],
4103 &format_page_default,
4104 sizeof(format_page_default));
4105 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED],
4106 &format_page_default,
4107 sizeof(format_page_default));
4109 format_page = &lun->mode_pages.format_page[
4111 scsi_ulto2b(lun->be_lun->blocksize,
4112 format_page->bytes_per_sector);
4114 format_page = &lun->mode_pages.format_page[
4116 scsi_ulto2b(lun->be_lun->blocksize,
4117 format_page->bytes_per_sector);
4119 format_page = &lun->mode_pages.format_page[
4121 scsi_ulto2b(lun->be_lun->blocksize,
4122 format_page->bytes_per_sector);
4124 page_index->page_data =
4125 (uint8_t *)lun->mode_pages.format_page;
4128 case SMS_RIGID_DISK_PAGE: {
4129 struct scsi_rigid_disk_page *rigid_disk_page;
4130 uint32_t sectors_per_cylinder;
4134 #endif /* !__XSCALE__ */
4136 if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4137 panic("invalid subpage value %d",
4138 page_index->subpage);
4141 * Rotation rate and sectors per track are set
4142 * above. We calculate the cylinders here based on
4143 * capacity. Due to the number of heads and
4144 * sectors per track we're using, smaller arrays
4145 * may turn out to have 0 cylinders. Linux and
4146 * FreeBSD don't pay attention to these mode pages
4147 * to figure out capacity, but Solaris does. It
4148 * seems to deal with 0 cylinders just fine, and
4149 * works out a fake geometry based on the capacity.
4151 memcpy(&lun->mode_pages.rigid_disk_page[
4152 CTL_PAGE_DEFAULT], &rigid_disk_page_default,
4153 sizeof(rigid_disk_page_default));
4154 memcpy(&lun->mode_pages.rigid_disk_page[
4155 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable,
4156 sizeof(rigid_disk_page_changeable));
4158 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK *
4162 * The divide method here will be more accurate,
4163 * probably, but results in floating point being
4164 * used in the kernel on i386 (__udivdi3()). On the
4165 * XScale, though, __udivdi3() is implemented in
4168 * The shift method for cylinder calculation is
4169 * accurate if sectors_per_cylinder is a power of
4170 * 2. Otherwise it might be slightly off -- you
4171 * might have a bit of a truncation problem.
4174 cylinders = (lun->be_lun->maxlba + 1) /
4175 sectors_per_cylinder;
4177 for (shift = 31; shift > 0; shift--) {
4178 if (sectors_per_cylinder & (1 << shift))
4181 cylinders = (lun->be_lun->maxlba + 1) >> shift;
4185 * We've basically got 3 bytes, or 24 bits for the
4186 * cylinder size in the mode page. If we're over,
4187 * just round down to 2^24.
4189 if (cylinders > 0xffffff)
4190 cylinders = 0xffffff;
4192 rigid_disk_page = &lun->mode_pages.rigid_disk_page[
4194 scsi_ulto3b(cylinders, rigid_disk_page->cylinders);
4196 if ((value = ctl_get_opt(&lun->be_lun->options,
4198 scsi_ulto2b(strtol(value, NULL, 0),
4199 rigid_disk_page->rotation_rate);
4202 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT],
4203 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT],
4204 sizeof(rigid_disk_page_default));
4205 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED],
4206 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT],
4207 sizeof(rigid_disk_page_default));
4209 page_index->page_data =
4210 (uint8_t *)lun->mode_pages.rigid_disk_page;
4213 case SMS_CACHING_PAGE: {
4214 struct scsi_caching_page *caching_page;
4216 if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4217 panic("invalid subpage value %d",
4218 page_index->subpage);
4219 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT],
4220 &caching_page_default,
4221 sizeof(caching_page_default));
4222 memcpy(&lun->mode_pages.caching_page[
4223 CTL_PAGE_CHANGEABLE], &caching_page_changeable,
4224 sizeof(caching_page_changeable));
4225 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED],
4226 &caching_page_default,
4227 sizeof(caching_page_default));
4228 caching_page = &lun->mode_pages.caching_page[
4230 value = ctl_get_opt(&lun->be_lun->options, "writecache");
4231 if (value != NULL && strcmp(value, "off") == 0)
4232 caching_page->flags1 &= ~SCP_WCE;
4233 value = ctl_get_opt(&lun->be_lun->options, "readcache");
4234 if (value != NULL && strcmp(value, "off") == 0)
4235 caching_page->flags1 |= SCP_RCD;
4236 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT],
4237 &lun->mode_pages.caching_page[CTL_PAGE_SAVED],
4238 sizeof(caching_page_default));
4239 page_index->page_data =
4240 (uint8_t *)lun->mode_pages.caching_page;
4243 case SMS_CONTROL_MODE_PAGE: {
4244 struct scsi_control_page *control_page;
4246 if (page_index->subpage != SMS_SUBPAGE_PAGE_0)
4247 panic("invalid subpage value %d",
4248 page_index->subpage);
4250 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT],
4251 &control_page_default,
4252 sizeof(control_page_default));
4253 memcpy(&lun->mode_pages.control_page[
4254 CTL_PAGE_CHANGEABLE], &control_page_changeable,
4255 sizeof(control_page_changeable));
4256 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED],
4257 &control_page_default,
4258 sizeof(control_page_default));
4259 control_page = &lun->mode_pages.control_page[
4261 value = ctl_get_opt(&lun->be_lun->options, "reordering");
4262 if (value != NULL && strcmp(value, "unrestricted") == 0) {
4263 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK;
4264 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED;
4266 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT],
4267 &lun->mode_pages.control_page[CTL_PAGE_SAVED],
4268 sizeof(control_page_default));
4269 page_index->page_data =
4270 (uint8_t *)lun->mode_pages.control_page;
4274 case SMS_INFO_EXCEPTIONS_PAGE: {
4275 switch (page_index->subpage) {
4276 case SMS_SUBPAGE_PAGE_0:
4277 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT],
4279 sizeof(ie_page_default));
4280 memcpy(&lun->mode_pages.ie_page[
4281 CTL_PAGE_CHANGEABLE], &ie_page_changeable,
4282 sizeof(ie_page_changeable));
4283 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT],
4285 sizeof(ie_page_default));
4286 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED],
4288 sizeof(ie_page_default));
4289 page_index->page_data =
4290 (uint8_t *)lun->mode_pages.ie_page;
4293 struct ctl_logical_block_provisioning_page *page;
4295 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT],
4297 sizeof(lbp_page_default));
4298 memcpy(&lun->mode_pages.lbp_page[
4299 CTL_PAGE_CHANGEABLE], &lbp_page_changeable,
4300 sizeof(lbp_page_changeable));
4301 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
4303 sizeof(lbp_page_default));
4304 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED];
4305 value = ctl_get_opt(&lun->be_lun->options,
4307 if (value != NULL &&
4308 ctl_expand_number(value, &ival) == 0) {
4309 page->descr[0].flags |= SLBPPD_ENABLED |
4311 if (lun->be_lun->blocksize)
4312 ival /= lun->be_lun->blocksize;
4315 scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4316 page->descr[0].count);
4318 value = ctl_get_opt(&lun->be_lun->options,
4320 if (value != NULL &&
4321 ctl_expand_number(value, &ival) == 0) {
4322 page->descr[1].flags |= SLBPPD_ENABLED |
4324 if (lun->be_lun->blocksize)
4325 ival /= lun->be_lun->blocksize;
4328 scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4329 page->descr[1].count);
4331 value = ctl_get_opt(&lun->be_lun->options,
4332 "pool-avail-threshold");
4333 if (value != NULL &&
4334 ctl_expand_number(value, &ival) == 0) {
4335 page->descr[2].flags |= SLBPPD_ENABLED |
4337 if (lun->be_lun->blocksize)
4338 ival /= lun->be_lun->blocksize;
4341 scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4342 page->descr[2].count);
4344 value = ctl_get_opt(&lun->be_lun->options,
4345 "pool-used-threshold");
4346 if (value != NULL &&
4347 ctl_expand_number(value, &ival) == 0) {
4348 page->descr[3].flags |= SLBPPD_ENABLED |
4350 if (lun->be_lun->blocksize)
4351 ival /= lun->be_lun->blocksize;
4354 scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
4355 page->descr[3].count);
4357 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT],
4358 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
4359 sizeof(lbp_page_default));
4360 page_index->page_data =
4361 (uint8_t *)lun->mode_pages.lbp_page;
4365 case SMS_VENDOR_SPECIFIC_PAGE:{
4366 switch (page_index->subpage) {
4367 case DBGCNF_SUBPAGE_CODE: {
4368 struct copan_debugconf_subpage *current_page,
4371 memcpy(&lun->mode_pages.debugconf_subpage[
4373 &debugconf_page_default,
4374 sizeof(debugconf_page_default));
4375 memcpy(&lun->mode_pages.debugconf_subpage[
4376 CTL_PAGE_CHANGEABLE],
4377 &debugconf_page_changeable,
4378 sizeof(debugconf_page_changeable));
4379 memcpy(&lun->mode_pages.debugconf_subpage[
4381 &debugconf_page_default,
4382 sizeof(debugconf_page_default));
4383 memcpy(&lun->mode_pages.debugconf_subpage[
4385 &debugconf_page_default,
4386 sizeof(debugconf_page_default));
4387 page_index->page_data =
4388 (uint8_t *)lun->mode_pages.debugconf_subpage;
4390 current_page = (struct copan_debugconf_subpage *)
4391 (page_index->page_data +
4392 (page_index->page_len *
4394 saved_page = (struct copan_debugconf_subpage *)
4395 (page_index->page_data +
4396 (page_index->page_len *
4401 panic("invalid subpage value %d",
4402 page_index->subpage);
4408 panic("invalid page value %d",
4409 page_index->page_code & SMPH_PC_MASK);
4414 return (CTL_RETVAL_COMPLETE);
4418 ctl_init_log_page_index(struct ctl_lun *lun)
4420 struct ctl_page_index *page_index;
4423 memcpy(&lun->log_pages.index, log_page_index_template,
4424 sizeof(log_page_index_template));
4427 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) {
4429 page_index = &lun->log_pages.index[i];
4431 * If this is a disk-only mode page, there's no point in
4432 * setting it up. For some pages, we have to have some
4433 * basic information about the disk in order to calculate the
4436 if ((lun->be_lun->lun_type != T_DIRECT)
4437 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY))
4440 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING &&
4441 lun->backend->lun_attr == NULL)
4444 if (page_index->page_code != prev) {
4445 lun->log_pages.pages_page[j] = page_index->page_code;
4446 prev = page_index->page_code;
4449 lun->log_pages.subpages_page[k*2] = page_index->page_code;
4450 lun->log_pages.subpages_page[k*2+1] = page_index->subpage;
4453 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0];
4454 lun->log_pages.index[0].page_len = j;
4455 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0];
4456 lun->log_pages.index[1].page_len = k * 2;
4457 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0];
4458 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS;
4459 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page;
4460 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page);
4462 return (CTL_RETVAL_COMPLETE);
4466 hex2bin(const char *str, uint8_t *buf, int buf_size)
4471 memset(buf, 0, buf_size);
4472 while (isspace(str[0]))
4474 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X'))
4477 for (i = 0; str[i] != 0 && i < buf_size; i++) {
4481 else if (isalpha(c))
4482 c -= isupper(c) ? 'A' - 10 : 'a' - 10;
4488 buf[i / 2] |= (c << 4);
4492 return ((i + 1) / 2);
4499 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he
4500 * wants us to allocate the LUN and he can block.
4501 * - ctl_softc is always set
4502 * - be_lun is set if the LUN has a backend (needed for disk LUNs)
4504 * Returns 0 for success, non-zero (errno) for failure.
4507 ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
4508 struct ctl_be_lun *const be_lun)
4510 struct ctl_lun *nlun, *lun;
4511 struct scsi_vpd_id_descriptor *desc;
4512 struct scsi_vpd_id_t10 *t10id;
4513 const char *eui, *naa, *scsiname, *vendor, *value;
4514 int lun_number, i, lun_malloced;
4515 int devidlen, idlen1, idlen2 = 0, len;
4521 * We currently only support Direct Access or Processor LUN types.
4523 switch (be_lun->lun_type) {
4531 be_lun->lun_config_status(be_lun->be_lun,
4532 CTL_LUN_CONFIG_FAILURE);
4535 if (ctl_lun == NULL) {
4536 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK);
4543 memset(lun, 0, sizeof(*lun));
4545 lun->flags = CTL_LUN_MALLOCED;
4547 /* Generate LUN ID. */
4548 devidlen = max(CTL_DEVID_MIN_LEN,
4549 strnlen(be_lun->device_id, CTL_DEVID_LEN));
4550 idlen1 = sizeof(*t10id) + devidlen;
4551 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1;
4552 scsiname = ctl_get_opt(&be_lun->options, "scsiname");
4553 if (scsiname != NULL) {
4554 idlen2 = roundup2(strlen(scsiname) + 1, 4);
4555 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2;
4557 eui = ctl_get_opt(&be_lun->options, "eui");
4559 len += sizeof(struct scsi_vpd_id_descriptor) + 16;
4561 naa = ctl_get_opt(&be_lun->options, "naa");
4563 len += sizeof(struct scsi_vpd_id_descriptor) + 16;
4565 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len,
4566 M_CTL, M_WAITOK | M_ZERO);
4567 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data;
4568 desc->proto_codeset = SVPD_ID_CODESET_ASCII;
4569 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10;
4570 desc->length = idlen1;
4571 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0];
4572 memset(t10id->vendor, ' ', sizeof(t10id->vendor));
4573 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) {
4574 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor));
4576 strncpy(t10id->vendor, vendor,
4577 min(sizeof(t10id->vendor), strlen(vendor)));
4579 strncpy((char *)t10id->vendor_spec_id,
4580 (char *)be_lun->device_id, devidlen);
4581 if (scsiname != NULL) {
4582 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4584 desc->proto_codeset = SVPD_ID_CODESET_UTF8;
4585 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4586 SVPD_ID_TYPE_SCSI_NAME;
4587 desc->length = idlen2;
4588 strlcpy(desc->identifier, scsiname, idlen2);
4591 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4593 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4594 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4596 desc->length = hex2bin(eui, desc->identifier, 16);
4597 desc->length = desc->length > 12 ? 16 :
4598 (desc->length > 8 ? 12 : 8);
4599 len -= 16 - desc->length;
4602 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
4604 desc->proto_codeset = SVPD_ID_CODESET_BINARY;
4605 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN |
4607 desc->length = hex2bin(naa, desc->identifier, 16);
4608 desc->length = desc->length > 8 ? 16 : 8;
4609 len -= 16 - desc->length;
4611 lun->lun_devid->len = len;
4613 mtx_lock(&ctl_softc->ctl_lock);
4615 * See if the caller requested a particular LUN number. If so, see
4616 * if it is available. Otherwise, allocate the first available LUN.
4618 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) {
4619 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1))
4620 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) {
4621 mtx_unlock(&ctl_softc->ctl_lock);
4622 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) {
4623 printf("ctl: requested LUN ID %d is higher "
4624 "than CTL_MAX_LUNS - 1 (%d)\n",
4625 be_lun->req_lun_id, CTL_MAX_LUNS - 1);
4628 * XXX KDM return an error, or just assign
4629 * another LUN ID in this case??
4631 printf("ctl: requested LUN ID %d is already "
4632 "in use\n", be_lun->req_lun_id);
4634 if (lun->flags & CTL_LUN_MALLOCED)
4636 be_lun->lun_config_status(be_lun->be_lun,
4637 CTL_LUN_CONFIG_FAILURE);
4640 lun_number = be_lun->req_lun_id;
4642 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS);
4643 if (lun_number == -1) {
4644 mtx_unlock(&ctl_softc->ctl_lock);
4645 printf("ctl: can't allocate LUN, out of LUNs\n");
4646 if (lun->flags & CTL_LUN_MALLOCED)
4648 be_lun->lun_config_status(be_lun->be_lun,
4649 CTL_LUN_CONFIG_FAILURE);
4653 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number);
4655 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF);
4656 lun->lun = lun_number;
4657 lun->be_lun = be_lun;
4659 * The processor LUN is always enabled. Disk LUNs come on line
4660 * disabled, and must be enabled by the backend.
4662 lun->flags |= CTL_LUN_DISABLED;
4663 lun->backend = be_lun->be;
4664 be_lun->ctl_lun = lun;
4665 be_lun->lun_id = lun_number;
4666 atomic_add_int(&be_lun->be->num_luns, 1);
4667 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE)
4668 lun->flags |= CTL_LUN_OFFLINE;
4670 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF)
4671 lun->flags |= CTL_LUN_STOPPED;
4673 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE)
4674 lun->flags |= CTL_LUN_INOPERABLE;
4676 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY)
4677 lun->flags |= CTL_LUN_PRIMARY_SC;
4679 value = ctl_get_opt(&be_lun->options, "readonly");
4680 if (value != NULL && strcmp(value, "on") == 0)
4681 lun->flags |= CTL_LUN_READONLY;
4683 lun->serseq = CTL_LUN_SERSEQ_OFF;
4684 if (be_lun->flags & CTL_LUN_FLAG_SERSEQ_READ)
4685 lun->serseq = CTL_LUN_SERSEQ_READ;
4686 value = ctl_get_opt(&be_lun->options, "serseq");
4687 if (value != NULL && strcmp(value, "on") == 0)
4688 lun->serseq = CTL_LUN_SERSEQ_ON;
4689 else if (value != NULL && strcmp(value, "read") == 0)
4690 lun->serseq = CTL_LUN_SERSEQ_READ;
4691 else if (value != NULL && strcmp(value, "off") == 0)
4692 lun->serseq = CTL_LUN_SERSEQ_OFF;
4694 lun->ctl_softc = ctl_softc;
4696 lun->last_busy = getsbinuptime();
4698 TAILQ_INIT(&lun->ooa_queue);
4699 TAILQ_INIT(&lun->blocked_queue);
4700 STAILQ_INIT(&lun->error_list);
4701 ctl_tpc_lun_init(lun);
4704 * Initialize the mode and log page index.
4706 ctl_init_page_index(lun);
4707 ctl_init_log_page_index(lun);
4710 * Now, before we insert this lun on the lun list, set the lun
4711 * inventory changed UA for all other luns.
4713 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) {
4714 mtx_lock(&nlun->lun_lock);
4715 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
4716 mtx_unlock(&nlun->lun_lock);
4719 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links);
4721 ctl_softc->ctl_luns[lun_number] = lun;
4723 ctl_softc->num_luns++;
4725 /* Setup statistics gathering */
4726 lun->stats.device_type = be_lun->lun_type;
4727 lun->stats.lun_number = lun_number;
4728 if (lun->stats.device_type == T_DIRECT)
4729 lun->stats.blocksize = be_lun->blocksize;
4731 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
4732 for (i = 0;i < CTL_MAX_PORTS;i++)
4733 lun->stats.ports[i].targ_port = i;
4735 mtx_unlock(&ctl_softc->ctl_lock);
4737 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK);
4744 * - LUN has already been marked invalid and any pending I/O has been taken
4748 ctl_free_lun(struct ctl_lun *lun)
4750 struct ctl_softc *softc;
4751 struct ctl_lun *nlun;
4754 softc = lun->ctl_softc;
4756 mtx_assert(&softc->ctl_lock, MA_OWNED);
4758 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
4760 ctl_clear_mask(softc->ctl_lun_mask, lun->lun);
4762 softc->ctl_luns[lun->lun] = NULL;
4764 if (!TAILQ_EMPTY(&lun->ooa_queue))
4765 panic("Freeing a LUN %p with outstanding I/O!!\n", lun);
4770 * Tell the backend to free resources, if this LUN has a backend.
4772 atomic_subtract_int(&lun->be_lun->be->num_luns, 1);
4773 lun->be_lun->lun_shutdown(lun->be_lun->be_lun);
4775 ctl_tpc_lun_shutdown(lun);
4776 mtx_destroy(&lun->lun_lock);
4777 free(lun->lun_devid, M_CTL);
4778 for (i = 0; i < CTL_MAX_PORTS; i++)
4779 free(lun->pending_ua[i], M_CTL);
4780 for (i = 0; i < 2 * CTL_MAX_PORTS; i++)
4781 free(lun->pr_keys[i], M_CTL);
4782 free(lun->write_buffer, M_CTL);
4783 if (lun->flags & CTL_LUN_MALLOCED)
4786 STAILQ_FOREACH(nlun, &softc->lun_list, links) {
4787 mtx_lock(&nlun->lun_lock);
4788 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE);
4789 mtx_unlock(&nlun->lun_lock);
4796 ctl_create_lun(struct ctl_be_lun *be_lun)
4798 struct ctl_softc *softc;
4800 softc = control_softc;
4803 * ctl_alloc_lun() should handle all potential failure cases.
4805 ctl_alloc_lun(softc, NULL, be_lun);
4809 ctl_add_lun(struct ctl_be_lun *be_lun)
4811 struct ctl_softc *softc = control_softc;
4813 mtx_lock(&softc->ctl_lock);
4814 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links);
4815 mtx_unlock(&softc->ctl_lock);
4816 wakeup(&softc->pending_lun_queue);
4822 ctl_enable_lun(struct ctl_be_lun *be_lun)
4824 struct ctl_softc *softc;
4825 struct ctl_port *port, *nport;
4826 struct ctl_lun *lun;
4829 lun = (struct ctl_lun *)be_lun->ctl_lun;
4830 softc = lun->ctl_softc;
4832 mtx_lock(&softc->ctl_lock);
4833 mtx_lock(&lun->lun_lock);
4834 if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4836 * eh? Why did we get called if the LUN is already
4839 mtx_unlock(&lun->lun_lock);
4840 mtx_unlock(&softc->ctl_lock);
4843 lun->flags &= ~CTL_LUN_DISABLED;
4844 mtx_unlock(&lun->lun_lock);
4846 for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) {
4847 nport = STAILQ_NEXT(port, links);
4848 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 ||
4849 port->lun_map != NULL)
4853 * Drop the lock while we call the FETD's enable routine.
4854 * This can lead to a callback into CTL (at least in the
4855 * case of the internal initiator frontend.
4857 mtx_unlock(&softc->ctl_lock);
4858 retval = port->lun_enable(port->targ_lun_arg, lun->lun);
4859 mtx_lock(&softc->ctl_lock);
4861 printf("%s: FETD %s port %d returned error "
4862 "%d for lun_enable on lun %jd\n",
4863 __func__, port->port_name, port->targ_port,
4864 retval, (intmax_t)lun->lun);
4868 mtx_unlock(&softc->ctl_lock);
4874 ctl_disable_lun(struct ctl_be_lun *be_lun)
4876 struct ctl_softc *softc;
4877 struct ctl_port *port;
4878 struct ctl_lun *lun;
4881 lun = (struct ctl_lun *)be_lun->ctl_lun;
4882 softc = lun->ctl_softc;
4884 mtx_lock(&softc->ctl_lock);
4885 mtx_lock(&lun->lun_lock);
4886 if (lun->flags & CTL_LUN_DISABLED) {
4887 mtx_unlock(&lun->lun_lock);
4888 mtx_unlock(&softc->ctl_lock);
4891 lun->flags |= CTL_LUN_DISABLED;
4892 mtx_unlock(&lun->lun_lock);
4894 STAILQ_FOREACH(port, &softc->port_list, links) {
4895 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 ||
4896 port->lun_map != NULL)
4898 mtx_unlock(&softc->ctl_lock);
4900 * Drop the lock before we call the frontend's disable
4901 * routine, to avoid lock order reversals.
4903 * XXX KDM what happens if the frontend list changes while
4904 * we're traversing it? It's unlikely, but should be handled.
4906 retval = port->lun_disable(port->targ_lun_arg, lun->lun);
4907 mtx_lock(&softc->ctl_lock);
4909 printf("%s: FETD %s port %d returned error "
4910 "%d for lun_disable on lun %jd\n",
4911 __func__, port->port_name, port->targ_port,
4912 retval, (intmax_t)lun->lun);
4916 mtx_unlock(&softc->ctl_lock);
4922 ctl_start_lun(struct ctl_be_lun *be_lun)
4924 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4926 mtx_lock(&lun->lun_lock);
4927 lun->flags &= ~CTL_LUN_STOPPED;
4928 mtx_unlock(&lun->lun_lock);
4933 ctl_stop_lun(struct ctl_be_lun *be_lun)
4935 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4937 mtx_lock(&lun->lun_lock);
4938 lun->flags |= CTL_LUN_STOPPED;
4939 mtx_unlock(&lun->lun_lock);
4944 ctl_lun_offline(struct ctl_be_lun *be_lun)
4946 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4948 mtx_lock(&lun->lun_lock);
4949 lun->flags |= CTL_LUN_OFFLINE;
4950 mtx_unlock(&lun->lun_lock);
4955 ctl_lun_online(struct ctl_be_lun *be_lun)
4957 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
4959 mtx_lock(&lun->lun_lock);
4960 lun->flags &= ~CTL_LUN_OFFLINE;
4961 mtx_unlock(&lun->lun_lock);
4966 ctl_invalidate_lun(struct ctl_be_lun *be_lun)
4968 struct ctl_softc *softc;
4969 struct ctl_lun *lun;
4971 lun = (struct ctl_lun *)be_lun->ctl_lun;
4972 softc = lun->ctl_softc;
4974 mtx_lock(&lun->lun_lock);
4977 * The LUN needs to be disabled before it can be marked invalid.
4979 if ((lun->flags & CTL_LUN_DISABLED) == 0) {
4980 mtx_unlock(&lun->lun_lock);
4984 * Mark the LUN invalid.
4986 lun->flags |= CTL_LUN_INVALID;
4989 * If there is nothing in the OOA queue, go ahead and free the LUN.
4990 * If we have something in the OOA queue, we'll free it when the
4991 * last I/O completes.
4993 if (TAILQ_EMPTY(&lun->ooa_queue)) {
4994 mtx_unlock(&lun->lun_lock);
4995 mtx_lock(&softc->ctl_lock);
4997 mtx_unlock(&softc->ctl_lock);
4999 mtx_unlock(&lun->lun_lock);
5005 ctl_lun_inoperable(struct ctl_be_lun *be_lun)
5007 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
5009 mtx_lock(&lun->lun_lock);
5010 lun->flags |= CTL_LUN_INOPERABLE;
5011 mtx_unlock(&lun->lun_lock);
5016 ctl_lun_operable(struct ctl_be_lun *be_lun)
5018 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
5020 mtx_lock(&lun->lun_lock);
5021 lun->flags &= ~CTL_LUN_INOPERABLE;
5022 mtx_unlock(&lun->lun_lock);
5027 ctl_lun_capacity_changed(struct ctl_be_lun *be_lun)
5029 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun;
5031 mtx_lock(&lun->lun_lock);
5032 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED);
5033 mtx_unlock(&lun->lun_lock);
5037 * Backend "memory move is complete" callback for requests that never
5038 * make it down to say RAIDCore's configuration code.
5041 ctl_config_move_done(union ctl_io *io)
5045 CTL_DEBUG_PRINT(("ctl_config_move_done\n"));
5046 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI,
5047 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type));
5049 if ((io->io_hdr.port_status != 0) &&
5050 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5051 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5053 * For hardware error sense keys, the sense key
5054 * specific value is defined to be a retry count,
5055 * but we use it to pass back an internal FETD
5056 * error code. XXX KDM Hopefully the FETD is only
5057 * using 16 bits for an error code, since that's
5058 * all the space we have in the sks field.
5060 ctl_set_internal_failure(&io->scsiio,
5063 io->io_hdr.port_status);
5066 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ||
5067 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
5068 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) ||
5069 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) {
5071 * XXX KDM just assuming a single pointer here, and not a
5072 * S/G list. If we start using S/G lists for config data,
5073 * we'll need to know how to clean them up here as well.
5075 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5076 free(io->scsiio.kern_data_ptr, M_CTL);
5078 retval = CTL_RETVAL_COMPLETE;
5081 * XXX KDM now we need to continue data movement. Some
5083 * - call ctl_scsiio() again? We don't do this for data
5084 * writes, because for those at least we know ahead of
5085 * time where the write will go and how long it is. For
5086 * config writes, though, that information is largely
5087 * contained within the write itself, thus we need to
5088 * parse out the data again.
5090 * - Call some other function once the data is in?
5092 if (ctl_debug & CTL_DEBUG_CDB_DATA)
5096 * XXX KDM call ctl_scsiio() again for now, and check flag
5097 * bits to see whether we're allocated or not.
5099 retval = ctl_scsiio(&io->scsiio);
5105 * This gets called by a backend driver when it is done with a
5106 * data_submit method.
5109 ctl_data_submit_done(union ctl_io *io)
5112 * If the IO_CONT flag is set, we need to call the supplied
5113 * function to continue processing the I/O, instead of completing
5116 * If there is an error, though, we don't want to keep processing.
5117 * Instead, just send status back to the initiator.
5119 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
5120 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
5121 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5122 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5123 io->scsiio.io_cont(io);
5130 * This gets called by a backend driver when it is done with a
5131 * configuration write.
5134 ctl_config_write_done(union ctl_io *io)
5139 * If the IO_CONT flag is set, we need to call the supplied
5140 * function to continue processing the I/O, instead of completing
5143 * If there is an error, though, we don't want to keep processing.
5144 * Instead, just send status back to the initiator.
5146 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) &&
5147 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
5148 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
5149 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
5150 io->scsiio.io_cont(io);
5154 * Since a configuration write can be done for commands that actually
5155 * have data allocated, like write buffer, and commands that have
5156 * no data, like start/stop unit, we need to check here.
5158 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5159 buf = io->scsiio.kern_data_ptr;
5168 ctl_config_read_done(union ctl_io *io)
5173 * If there is some error -- we are done, skip data transfer.
5175 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 ||
5176 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
5177 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
5178 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED)
5179 buf = io->scsiio.kern_data_ptr;
5189 * If the IO_CONT flag is set, we need to call the supplied
5190 * function to continue processing the I/O, instead of completing
5193 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) {
5194 io->scsiio.io_cont(io);
5202 * SCSI release command.
5205 ctl_scsi_release(struct ctl_scsiio *ctsio)
5207 int length, longid, thirdparty_id, resv_id;
5208 struct ctl_lun *lun;
5214 CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
5216 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
5217 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5219 switch (ctsio->cdb[0]) {
5221 struct scsi_release_10 *cdb;
5223 cdb = (struct scsi_release_10 *)ctsio->cdb;
5225 if (cdb->byte2 & SR10_LONGID)
5228 thirdparty_id = cdb->thirdparty_id;
5230 resv_id = cdb->resv_id;
5231 length = scsi_2btoul(cdb->length);
5238 * XXX KDM right now, we only support LUN reservation. We don't
5239 * support 3rd party reservations, or extent reservations, which
5240 * might actually need the parameter list. If we've gotten this
5241 * far, we've got a LUN reservation. Anything else got kicked out
5242 * above. So, according to SPC, ignore the length.
5246 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5248 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5249 ctsio->kern_data_len = length;
5250 ctsio->kern_total_len = length;
5251 ctsio->kern_data_resid = 0;
5252 ctsio->kern_rel_offset = 0;
5253 ctsio->kern_sg_entries = 0;
5254 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5255 ctsio->be_move_done = ctl_config_move_done;
5256 ctl_datamove((union ctl_io *)ctsio);
5258 return (CTL_RETVAL_COMPLETE);
5262 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
5264 mtx_lock(&lun->lun_lock);
5267 * According to SPC, it is not an error for an intiator to attempt
5268 * to release a reservation on a LUN that isn't reserved, or that
5269 * is reserved by another initiator. The reservation can only be
5270 * released, though, by the initiator who made it or by one of
5271 * several reset type events.
5273 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx))
5274 lun->flags &= ~CTL_LUN_RESERVED;
5276 mtx_unlock(&lun->lun_lock);
5278 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5279 free(ctsio->kern_data_ptr, M_CTL);
5280 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5283 ctl_set_success(ctsio);
5284 ctl_done((union ctl_io *)ctsio);
5285 return (CTL_RETVAL_COMPLETE);
5289 ctl_scsi_reserve(struct ctl_scsiio *ctsio)
5291 int extent, thirdparty, longid;
5292 int resv_id, length;
5293 uint64_t thirdparty_id;
5294 struct ctl_lun *lun;
5304 CTL_DEBUG_PRINT(("ctl_reserve\n"));
5306 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
5307 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5309 switch (ctsio->cdb[0]) {
5311 struct scsi_reserve_10 *cdb;
5313 cdb = (struct scsi_reserve_10 *)ctsio->cdb;
5315 if (cdb->byte2 & SR10_LONGID)
5318 thirdparty_id = cdb->thirdparty_id;
5320 resv_id = cdb->resv_id;
5321 length = scsi_2btoul(cdb->length);
5327 * XXX KDM right now, we only support LUN reservation. We don't
5328 * support 3rd party reservations, or extent reservations, which
5329 * might actually need the parameter list. If we've gotten this
5330 * far, we've got a LUN reservation. Anything else got kicked out
5331 * above. So, according to SPC, ignore the length.
5335 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5337 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5338 ctsio->kern_data_len = length;
5339 ctsio->kern_total_len = length;
5340 ctsio->kern_data_resid = 0;
5341 ctsio->kern_rel_offset = 0;
5342 ctsio->kern_sg_entries = 0;
5343 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5344 ctsio->be_move_done = ctl_config_move_done;
5345 ctl_datamove((union ctl_io *)ctsio);
5347 return (CTL_RETVAL_COMPLETE);
5351 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr);
5353 mtx_lock(&lun->lun_lock);
5354 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) {
5355 ctl_set_reservation_conflict(ctsio);
5359 lun->flags |= CTL_LUN_RESERVED;
5360 lun->res_idx = residx;
5362 ctl_set_success(ctsio);
5365 mtx_unlock(&lun->lun_lock);
5367 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5368 free(ctsio->kern_data_ptr, M_CTL);
5369 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5372 ctl_done((union ctl_io *)ctsio);
5373 return (CTL_RETVAL_COMPLETE);
5377 ctl_start_stop(struct ctl_scsiio *ctsio)
5379 struct scsi_start_stop_unit *cdb;
5380 struct ctl_lun *lun;
5383 CTL_DEBUG_PRINT(("ctl_start_stop\n"));
5385 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5388 cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
5392 * We don't support the immediate bit on a stop unit. In order to
5393 * do that, we would need to code up a way to know that a stop is
5394 * pending, and hold off any new commands until it completes, one
5395 * way or another. Then we could accept or reject those commands
5396 * depending on its status. We would almost need to do the reverse
5397 * of what we do below for an immediate start -- return the copy of
5398 * the ctl_io to the FETD with status to send to the host (and to
5399 * free the copy!) and then free the original I/O once the stop
5400 * actually completes. That way, the OOA queue mechanism can work
5401 * to block commands that shouldn't proceed. Another alternative
5402 * would be to put the copy in the queue in place of the original,
5403 * and return the original back to the caller. That could be
5406 if ((cdb->byte2 & SSS_IMMED)
5407 && ((cdb->how & SSS_START) == 0)) {
5408 ctl_set_invalid_field(ctsio,
5414 ctl_done((union ctl_io *)ctsio);
5415 return (CTL_RETVAL_COMPLETE);
5418 if ((lun->flags & CTL_LUN_PR_RESERVED)
5419 && ((cdb->how & SSS_START)==0)) {
5422 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
5423 if (ctl_get_prkey(lun, residx) == 0
5424 || (lun->pr_res_idx!=residx && lun->res_type < 4)) {
5426 ctl_set_reservation_conflict(ctsio);
5427 ctl_done((union ctl_io *)ctsio);
5428 return (CTL_RETVAL_COMPLETE);
5433 * If there is no backend on this device, we can't start or stop
5434 * it. In theory we shouldn't get any start/stop commands in the
5435 * first place at this level if the LUN doesn't have a backend.
5436 * That should get stopped by the command decode code.
5438 if (lun->backend == NULL) {
5439 ctl_set_invalid_opcode(ctsio);
5440 ctl_done((union ctl_io *)ctsio);
5441 return (CTL_RETVAL_COMPLETE);
5445 * XXX KDM Copan-specific offline behavior.
5446 * Figure out a reasonable way to port this?
5449 mtx_lock(&lun->lun_lock);
5451 if (((cdb->byte2 & SSS_ONOFFLINE) == 0)
5452 && (lun->flags & CTL_LUN_OFFLINE)) {
5454 * If the LUN is offline, and the on/offline bit isn't set,
5455 * reject the start or stop. Otherwise, let it through.
5457 mtx_unlock(&lun->lun_lock);
5458 ctl_set_lun_not_ready(ctsio);
5459 ctl_done((union ctl_io *)ctsio);
5461 mtx_unlock(&lun->lun_lock);
5462 #endif /* NEEDTOPORT */
5464 * This could be a start or a stop when we're online,
5465 * or a stop/offline or start/online. A start or stop when
5466 * we're offline is covered in the case above.
5469 * In the non-immediate case, we send the request to
5470 * the backend and return status to the user when
5473 * In the immediate case, we allocate a new ctl_io
5474 * to hold a copy of the request, and send that to
5475 * the backend. We then set good status on the
5476 * user's request and return it immediately.
5478 if (cdb->byte2 & SSS_IMMED) {
5479 union ctl_io *new_io;
5481 new_io = ctl_alloc_io(ctsio->io_hdr.pool);
5482 ctl_copy_io((union ctl_io *)ctsio, new_io);
5483 retval = lun->backend->config_write(new_io);
5484 ctl_set_success(ctsio);
5485 ctl_done((union ctl_io *)ctsio);
5487 retval = lun->backend->config_write(
5488 (union ctl_io *)ctsio);
5497 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but
5498 * we don't really do anything with the LBA and length fields if the user
5499 * passes them in. Instead we'll just flush out the cache for the entire
5503 ctl_sync_cache(struct ctl_scsiio *ctsio)
5505 struct ctl_lun *lun;
5506 struct ctl_softc *softc;
5507 struct ctl_lba_len_flags *lbalen;
5508 uint64_t starting_lba;
5509 uint32_t block_count;
5513 CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
5515 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5516 softc = lun->ctl_softc;
5519 switch (ctsio->cdb[0]) {
5520 case SYNCHRONIZE_CACHE: {
5521 struct scsi_sync_cache *cdb;
5522 cdb = (struct scsi_sync_cache *)ctsio->cdb;
5524 starting_lba = scsi_4btoul(cdb->begin_lba);
5525 block_count = scsi_2btoul(cdb->lb_count);
5529 case SYNCHRONIZE_CACHE_16: {
5530 struct scsi_sync_cache_16 *cdb;
5531 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb;
5533 starting_lba = scsi_8btou64(cdb->begin_lba);
5534 block_count = scsi_4btoul(cdb->lb_count);
5539 ctl_set_invalid_opcode(ctsio);
5540 ctl_done((union ctl_io *)ctsio);
5542 break; /* NOTREACHED */
5546 * We check the LBA and length, but don't do anything with them.
5547 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to
5548 * get flushed. This check will just help satisfy anyone who wants
5549 * to see an error for an out of range LBA.
5551 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) {
5552 ctl_set_lba_out_of_range(ctsio);
5553 ctl_done((union ctl_io *)ctsio);
5558 * If this LUN has no backend, we can't flush the cache anyway.
5560 if (lun->backend == NULL) {
5561 ctl_set_invalid_opcode(ctsio);
5562 ctl_done((union ctl_io *)ctsio);
5566 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5567 lbalen->lba = starting_lba;
5568 lbalen->len = block_count;
5569 lbalen->flags = byte2;
5572 * Check to see whether we're configured to send the SYNCHRONIZE
5573 * CACHE command directly to the back end.
5575 mtx_lock(&lun->lun_lock);
5576 if ((softc->flags & CTL_FLAG_REAL_SYNC)
5577 && (++(lun->sync_count) >= lun->sync_interval)) {
5578 lun->sync_count = 0;
5579 mtx_unlock(&lun->lun_lock);
5580 retval = lun->backend->config_write((union ctl_io *)ctsio);
5582 mtx_unlock(&lun->lun_lock);
5583 ctl_set_success(ctsio);
5584 ctl_done((union ctl_io *)ctsio);
5593 ctl_format(struct ctl_scsiio *ctsio)
5595 struct scsi_format *cdb;
5596 struct ctl_lun *lun;
5597 int length, defect_list_len;
5599 CTL_DEBUG_PRINT(("ctl_format\n"));
5601 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5603 cdb = (struct scsi_format *)ctsio->cdb;
5606 if (cdb->byte2 & SF_FMTDATA) {
5607 if (cdb->byte2 & SF_LONGLIST)
5608 length = sizeof(struct scsi_format_header_long);
5610 length = sizeof(struct scsi_format_header_short);
5613 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0)
5615 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK);
5616 ctsio->kern_data_len = length;
5617 ctsio->kern_total_len = length;
5618 ctsio->kern_data_resid = 0;
5619 ctsio->kern_rel_offset = 0;
5620 ctsio->kern_sg_entries = 0;
5621 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5622 ctsio->be_move_done = ctl_config_move_done;
5623 ctl_datamove((union ctl_io *)ctsio);
5625 return (CTL_RETVAL_COMPLETE);
5628 defect_list_len = 0;
5630 if (cdb->byte2 & SF_FMTDATA) {
5631 if (cdb->byte2 & SF_LONGLIST) {
5632 struct scsi_format_header_long *header;
5634 header = (struct scsi_format_header_long *)
5635 ctsio->kern_data_ptr;
5637 defect_list_len = scsi_4btoul(header->defect_list_len);
5638 if (defect_list_len != 0) {
5639 ctl_set_invalid_field(ctsio,
5648 struct scsi_format_header_short *header;
5650 header = (struct scsi_format_header_short *)
5651 ctsio->kern_data_ptr;
5653 defect_list_len = scsi_2btoul(header->defect_list_len);
5654 if (defect_list_len != 0) {
5655 ctl_set_invalid_field(ctsio,
5667 * The format command will clear out the "Medium format corrupted"
5668 * status if set by the configuration code. That status is really
5669 * just a way to notify the host that we have lost the media, and
5670 * get them to issue a command that will basically make them think
5671 * they're blowing away the media.
5673 mtx_lock(&lun->lun_lock);
5674 lun->flags &= ~CTL_LUN_INOPERABLE;
5675 mtx_unlock(&lun->lun_lock);
5677 ctl_set_success(ctsio);
5680 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
5681 free(ctsio->kern_data_ptr, M_CTL);
5682 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
5685 ctl_done((union ctl_io *)ctsio);
5686 return (CTL_RETVAL_COMPLETE);
5690 ctl_read_buffer(struct ctl_scsiio *ctsio)
5692 struct scsi_read_buffer *cdb;
5693 struct ctl_lun *lun;
5694 int buffer_offset, len;
5695 static uint8_t descr[4];
5696 static uint8_t echo_descr[4] = { 0 };
5698 CTL_DEBUG_PRINT(("ctl_read_buffer\n"));
5700 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5701 cdb = (struct scsi_read_buffer *)ctsio->cdb;
5703 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA &&
5704 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR &&
5705 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) {
5706 ctl_set_invalid_field(ctsio,
5712 ctl_done((union ctl_io *)ctsio);
5713 return (CTL_RETVAL_COMPLETE);
5716 len = scsi_3btoul(cdb->length);
5717 buffer_offset = scsi_3btoul(cdb->offset);
5719 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
5720 ctl_set_invalid_field(ctsio,
5726 ctl_done((union ctl_io *)ctsio);
5727 return (CTL_RETVAL_COMPLETE);
5730 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) {
5732 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]);
5733 ctsio->kern_data_ptr = descr;
5734 len = min(len, sizeof(descr));
5735 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) {
5736 ctsio->kern_data_ptr = echo_descr;
5737 len = min(len, sizeof(echo_descr));
5739 if (lun->write_buffer == NULL) {
5740 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE,
5743 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
5745 ctsio->kern_data_len = len;
5746 ctsio->kern_total_len = len;
5747 ctsio->kern_data_resid = 0;
5748 ctsio->kern_rel_offset = 0;
5749 ctsio->kern_sg_entries = 0;
5750 ctl_set_success(ctsio);
5751 ctsio->be_move_done = ctl_config_move_done;
5752 ctl_datamove((union ctl_io *)ctsio);
5753 return (CTL_RETVAL_COMPLETE);
5757 ctl_write_buffer(struct ctl_scsiio *ctsio)
5759 struct scsi_write_buffer *cdb;
5760 struct ctl_lun *lun;
5761 int buffer_offset, len;
5763 CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
5765 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5766 cdb = (struct scsi_write_buffer *)ctsio->cdb;
5768 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) {
5769 ctl_set_invalid_field(ctsio,
5775 ctl_done((union ctl_io *)ctsio);
5776 return (CTL_RETVAL_COMPLETE);
5779 len = scsi_3btoul(cdb->length);
5780 buffer_offset = scsi_3btoul(cdb->offset);
5782 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
5783 ctl_set_invalid_field(ctsio,
5789 ctl_done((union ctl_io *)ctsio);
5790 return (CTL_RETVAL_COMPLETE);
5794 * If we've got a kernel request that hasn't been malloced yet,
5795 * malloc it and tell the caller the data buffer is here.
5797 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5798 if (lun->write_buffer == NULL) {
5799 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE,
5802 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset;
5803 ctsio->kern_data_len = len;
5804 ctsio->kern_total_len = len;
5805 ctsio->kern_data_resid = 0;
5806 ctsio->kern_rel_offset = 0;
5807 ctsio->kern_sg_entries = 0;
5808 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5809 ctsio->be_move_done = ctl_config_move_done;
5810 ctl_datamove((union ctl_io *)ctsio);
5812 return (CTL_RETVAL_COMPLETE);
5815 ctl_set_success(ctsio);
5816 ctl_done((union ctl_io *)ctsio);
5817 return (CTL_RETVAL_COMPLETE);
5821 ctl_write_same(struct ctl_scsiio *ctsio)
5823 struct ctl_lun *lun;
5824 struct ctl_lba_len_flags *lbalen;
5826 uint32_t num_blocks;
5830 retval = CTL_RETVAL_COMPLETE;
5832 CTL_DEBUG_PRINT(("ctl_write_same\n"));
5834 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5836 switch (ctsio->cdb[0]) {
5837 case WRITE_SAME_10: {
5838 struct scsi_write_same_10 *cdb;
5840 cdb = (struct scsi_write_same_10 *)ctsio->cdb;
5842 lba = scsi_4btoul(cdb->addr);
5843 num_blocks = scsi_2btoul(cdb->length);
5847 case WRITE_SAME_16: {
5848 struct scsi_write_same_16 *cdb;
5850 cdb = (struct scsi_write_same_16 *)ctsio->cdb;
5852 lba = scsi_8btou64(cdb->addr);
5853 num_blocks = scsi_4btoul(cdb->length);
5859 * We got a command we don't support. This shouldn't
5860 * happen, commands should be filtered out above us.
5862 ctl_set_invalid_opcode(ctsio);
5863 ctl_done((union ctl_io *)ctsio);
5865 return (CTL_RETVAL_COMPLETE);
5866 break; /* NOTREACHED */
5869 /* NDOB and ANCHOR flags can be used only together with UNMAP */
5870 if ((byte2 & SWS_UNMAP) == 0 &&
5871 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) {
5872 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
5873 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0);
5874 ctl_done((union ctl_io *)ctsio);
5875 return (CTL_RETVAL_COMPLETE);
5879 * The first check is to make sure we're in bounds, the second
5880 * check is to catch wrap-around problems. If the lba + num blocks
5881 * is less than the lba, then we've wrapped around and the block
5882 * range is invalid anyway.
5884 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
5885 || ((lba + num_blocks) < lba)) {
5886 ctl_set_lba_out_of_range(ctsio);
5887 ctl_done((union ctl_io *)ctsio);
5888 return (CTL_RETVAL_COMPLETE);
5891 /* Zero number of blocks means "to the last logical block" */
5892 if (num_blocks == 0) {
5893 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) {
5894 ctl_set_invalid_field(ctsio,
5900 ctl_done((union ctl_io *)ctsio);
5901 return (CTL_RETVAL_COMPLETE);
5903 num_blocks = (lun->be_lun->maxlba + 1) - lba;
5906 len = lun->be_lun->blocksize;
5909 * If we've got a kernel request that hasn't been malloced yet,
5910 * malloc it and tell the caller the data buffer is here.
5912 if ((byte2 & SWS_NDOB) == 0 &&
5913 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5914 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);;
5915 ctsio->kern_data_len = len;
5916 ctsio->kern_total_len = len;
5917 ctsio->kern_data_resid = 0;
5918 ctsio->kern_rel_offset = 0;
5919 ctsio->kern_sg_entries = 0;
5920 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5921 ctsio->be_move_done = ctl_config_move_done;
5922 ctl_datamove((union ctl_io *)ctsio);
5924 return (CTL_RETVAL_COMPLETE);
5927 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
5929 lbalen->len = num_blocks;
5930 lbalen->flags = byte2;
5931 retval = lun->backend->config_write((union ctl_io *)ctsio);
5937 ctl_unmap(struct ctl_scsiio *ctsio)
5939 struct ctl_lun *lun;
5940 struct scsi_unmap *cdb;
5941 struct ctl_ptr_len_flags *ptrlen;
5942 struct scsi_unmap_header *hdr;
5943 struct scsi_unmap_desc *buf, *end, *endnz, *range;
5945 uint32_t num_blocks;
5949 retval = CTL_RETVAL_COMPLETE;
5951 CTL_DEBUG_PRINT(("ctl_unmap\n"));
5953 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
5954 cdb = (struct scsi_unmap *)ctsio->cdb;
5956 len = scsi_2btoul(cdb->length);
5960 * If we've got a kernel request that hasn't been malloced yet,
5961 * malloc it and tell the caller the data buffer is here.
5963 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
5964 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);;
5965 ctsio->kern_data_len = len;
5966 ctsio->kern_total_len = len;
5967 ctsio->kern_data_resid = 0;
5968 ctsio->kern_rel_offset = 0;
5969 ctsio->kern_sg_entries = 0;
5970 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
5971 ctsio->be_move_done = ctl_config_move_done;
5972 ctl_datamove((union ctl_io *)ctsio);
5974 return (CTL_RETVAL_COMPLETE);
5977 len = ctsio->kern_total_len - ctsio->kern_data_resid;
5978 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr;
5979 if (len < sizeof (*hdr) ||
5980 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) ||
5981 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) ||
5982 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) {
5983 ctl_set_invalid_field(ctsio,
5991 len = scsi_2btoul(hdr->desc_length);
5992 buf = (struct scsi_unmap_desc *)(hdr + 1);
5993 end = buf + len / sizeof(*buf);
5996 for (range = buf; range < end; range++) {
5997 lba = scsi_8btou64(range->lba);
5998 num_blocks = scsi_4btoul(range->length);
5999 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
6000 || ((lba + num_blocks) < lba)) {
6001 ctl_set_lba_out_of_range(ctsio);
6002 ctl_done((union ctl_io *)ctsio);
6003 return (CTL_RETVAL_COMPLETE);
6005 if (num_blocks != 0)
6010 * Block backend can not handle zero last range.
6011 * Filter it out and return if there is nothing left.
6013 len = (uint8_t *)endnz - (uint8_t *)buf;
6015 ctl_set_success(ctsio);
6019 mtx_lock(&lun->lun_lock);
6020 ptrlen = (struct ctl_ptr_len_flags *)
6021 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
6022 ptrlen->ptr = (void *)buf;
6024 ptrlen->flags = byte2;
6025 ctl_check_blocked(lun);
6026 mtx_unlock(&lun->lun_lock);
6028 retval = lun->backend->config_write((union ctl_io *)ctsio);
6032 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) {
6033 free(ctsio->kern_data_ptr, M_CTL);
6034 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED;
6036 ctl_done((union ctl_io *)ctsio);
6037 return (CTL_RETVAL_COMPLETE);
6041 * Note that this function currently doesn't actually do anything inside
6042 * CTL to enforce things if the DQue bit is turned on.
6044 * Also note that this function can't be used in the default case, because
6045 * the DQue bit isn't set in the changeable mask for the control mode page
6046 * anyway. This is just here as an example for how to implement a page
6047 * handler, and a placeholder in case we want to allow the user to turn
6048 * tagged queueing on and off.
6050 * The D_SENSE bit handling is functional, however, and will turn
6051 * descriptor sense on and off for a given LUN.
6054 ctl_control_page_handler(struct ctl_scsiio *ctsio,
6055 struct ctl_page_index *page_index, uint8_t *page_ptr)
6057 struct scsi_control_page *current_cp, *saved_cp, *user_cp;
6058 struct ctl_lun *lun;
6062 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6063 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
6066 user_cp = (struct scsi_control_page *)page_ptr;
6067 current_cp = (struct scsi_control_page *)
6068 (page_index->page_data + (page_index->page_len *
6070 saved_cp = (struct scsi_control_page *)
6071 (page_index->page_data + (page_index->page_len *
6074 mtx_lock(&lun->lun_lock);
6075 if (((current_cp->rlec & SCP_DSENSE) == 0)
6076 && ((user_cp->rlec & SCP_DSENSE) != 0)) {
6078 * Descriptor sense is currently turned off and the user
6079 * wants to turn it on.
6081 current_cp->rlec |= SCP_DSENSE;
6082 saved_cp->rlec |= SCP_DSENSE;
6083 lun->flags |= CTL_LUN_SENSE_DESC;
6085 } else if (((current_cp->rlec & SCP_DSENSE) != 0)
6086 && ((user_cp->rlec & SCP_DSENSE) == 0)) {
6088 * Descriptor sense is currently turned on, and the user
6089 * wants to turn it off.
6091 current_cp->rlec &= ~SCP_DSENSE;
6092 saved_cp->rlec &= ~SCP_DSENSE;
6093 lun->flags &= ~CTL_LUN_SENSE_DESC;
6096 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) !=
6097 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) {
6098 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK;
6099 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK;
6100 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK;
6101 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK;
6104 if ((current_cp->eca_and_aen & SCP_SWP) !=
6105 (user_cp->eca_and_aen & SCP_SWP)) {
6106 current_cp->eca_and_aen &= ~SCP_SWP;
6107 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP;
6108 saved_cp->eca_and_aen &= ~SCP_SWP;
6109 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP;
6113 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
6114 mtx_unlock(&lun->lun_lock);
6120 ctl_caching_sp_handler(struct ctl_scsiio *ctsio,
6121 struct ctl_page_index *page_index, uint8_t *page_ptr)
6123 struct scsi_caching_page *current_cp, *saved_cp, *user_cp;
6124 struct ctl_lun *lun;
6128 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6129 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
6132 user_cp = (struct scsi_caching_page *)page_ptr;
6133 current_cp = (struct scsi_caching_page *)
6134 (page_index->page_data + (page_index->page_len *
6136 saved_cp = (struct scsi_caching_page *)
6137 (page_index->page_data + (page_index->page_len *
6140 mtx_lock(&lun->lun_lock);
6141 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) !=
6142 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) {
6143 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD);
6144 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD);
6145 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD);
6146 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD);
6150 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE);
6151 mtx_unlock(&lun->lun_lock);
6157 ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
6158 struct ctl_page_index *page_index,
6164 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs;
6169 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs));
6170 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs);
6171 printf("page data:");
6173 printf(" %.2x",page_ptr[i]);
6179 ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
6180 struct ctl_page_index *page_index,
6183 struct copan_debugconf_subpage *page;
6185 page = (struct copan_debugconf_subpage *)page_index->page_data +
6186 (page_index->page_len * pc);
6189 case SMS_PAGE_CTRL_CHANGEABLE >> 6:
6190 case SMS_PAGE_CTRL_DEFAULT >> 6:
6191 case SMS_PAGE_CTRL_SAVED >> 6:
6193 * We don't update the changable or default bits for this page.
6196 case SMS_PAGE_CTRL_CURRENT >> 6:
6197 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8;
6198 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0;
6202 EPRINT(0, "Invalid PC %d!!", pc);
6203 #endif /* NEEDTOPORT */
6211 ctl_do_mode_select(union ctl_io *io)
6213 struct scsi_mode_page_header *page_header;
6214 struct ctl_page_index *page_index;
6215 struct ctl_scsiio *ctsio;
6216 int control_dev, page_len;
6217 int page_len_offset, page_len_size;
6218 union ctl_modepage_info *modepage_info;
6219 struct ctl_lun *lun;
6220 int *len_left, *len_used;
6223 ctsio = &io->scsiio;
6226 retval = CTL_RETVAL_COMPLETE;
6228 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6230 if (lun->be_lun->lun_type != T_DIRECT)
6235 modepage_info = (union ctl_modepage_info *)
6236 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6237 len_left = &modepage_info->header.len_left;
6238 len_used = &modepage_info->header.len_used;
6242 page_header = (struct scsi_mode_page_header *)
6243 (ctsio->kern_data_ptr + *len_used);
6245 if (*len_left == 0) {
6246 free(ctsio->kern_data_ptr, M_CTL);
6247 ctl_set_success(ctsio);
6248 ctl_done((union ctl_io *)ctsio);
6249 return (CTL_RETVAL_COMPLETE);
6250 } else if (*len_left < sizeof(struct scsi_mode_page_header)) {
6252 free(ctsio->kern_data_ptr, M_CTL);
6253 ctl_set_param_len_error(ctsio);
6254 ctl_done((union ctl_io *)ctsio);
6255 return (CTL_RETVAL_COMPLETE);
6257 } else if ((page_header->page_code & SMPH_SPF)
6258 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) {
6260 free(ctsio->kern_data_ptr, M_CTL);
6261 ctl_set_param_len_error(ctsio);
6262 ctl_done((union ctl_io *)ctsio);
6263 return (CTL_RETVAL_COMPLETE);
6268 * XXX KDM should we do something with the block descriptor?
6270 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6272 if ((control_dev != 0)
6273 && (lun->mode_pages.index[i].page_flags &
6274 CTL_PAGE_FLAG_DISK_ONLY))
6277 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) !=
6278 (page_header->page_code & SMPH_PC_MASK))
6282 * If neither page has a subpage code, then we've got a
6285 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0)
6286 && ((page_header->page_code & SMPH_SPF) == 0)) {
6287 page_index = &lun->mode_pages.index[i];
6288 page_len = page_header->page_length;
6293 * If both pages have subpages, then the subpage numbers
6296 if ((lun->mode_pages.index[i].page_code & SMPH_SPF)
6297 && (page_header->page_code & SMPH_SPF)) {
6298 struct scsi_mode_page_header_sp *sph;
6300 sph = (struct scsi_mode_page_header_sp *)page_header;
6302 if (lun->mode_pages.index[i].subpage ==
6304 page_index = &lun->mode_pages.index[i];
6305 page_len = scsi_2btoul(sph->page_length);
6312 * If we couldn't find the page, or if we don't have a mode select
6313 * handler for it, send back an error to the user.
6315 if ((page_index == NULL)
6316 || (page_index->select_handler == NULL)) {
6317 ctl_set_invalid_field(ctsio,
6320 /*field*/ *len_used,
6323 free(ctsio->kern_data_ptr, M_CTL);
6324 ctl_done((union ctl_io *)ctsio);
6325 return (CTL_RETVAL_COMPLETE);
6328 if (page_index->page_code & SMPH_SPF) {
6329 page_len_offset = 2;
6333 page_len_offset = 1;
6337 * If the length the initiator gives us isn't the one we specify in
6338 * the mode page header, or if they didn't specify enough data in
6339 * the CDB to avoid truncating this page, kick out the request.
6341 if ((page_len != (page_index->page_len - page_len_offset -
6343 || (*len_left < page_index->page_len)) {
6346 ctl_set_invalid_field(ctsio,
6349 /*field*/ *len_used + page_len_offset,
6352 free(ctsio->kern_data_ptr, M_CTL);
6353 ctl_done((union ctl_io *)ctsio);
6354 return (CTL_RETVAL_COMPLETE);
6358 * Run through the mode page, checking to make sure that the bits
6359 * the user changed are actually legal for him to change.
6361 for (i = 0; i < page_index->page_len; i++) {
6362 uint8_t *user_byte, *change_mask, *current_byte;
6366 user_byte = (uint8_t *)page_header + i;
6367 change_mask = page_index->page_data +
6368 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i;
6369 current_byte = page_index->page_data +
6370 (page_index->page_len * CTL_PAGE_CURRENT) + i;
6373 * Check to see whether the user set any bits in this byte
6374 * that he is not allowed to set.
6376 if ((*user_byte & ~(*change_mask)) ==
6377 (*current_byte & ~(*change_mask)))
6381 * Go through bit by bit to determine which one is illegal.
6384 for (j = 7; j >= 0; j--) {
6385 if ((((1 << i) & ~(*change_mask)) & *user_byte) !=
6386 (((1 << i) & ~(*change_mask)) & *current_byte)) {
6391 ctl_set_invalid_field(ctsio,
6394 /*field*/ *len_used + i,
6397 free(ctsio->kern_data_ptr, M_CTL);
6398 ctl_done((union ctl_io *)ctsio);
6399 return (CTL_RETVAL_COMPLETE);
6403 * Decrement these before we call the page handler, since we may
6404 * end up getting called back one way or another before the handler
6405 * returns to this context.
6407 *len_left -= page_index->page_len;
6408 *len_used += page_index->page_len;
6410 retval = page_index->select_handler(ctsio, page_index,
6411 (uint8_t *)page_header);
6414 * If the page handler returns CTL_RETVAL_QUEUED, then we need to
6415 * wait until this queued command completes to finish processing
6416 * the mode page. If it returns anything other than
6417 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have
6418 * already set the sense information, freed the data pointer, and
6419 * completed the io for us.
6421 if (retval != CTL_RETVAL_COMPLETE)
6422 goto bailout_no_done;
6425 * If the initiator sent us more than one page, parse the next one.
6430 ctl_set_success(ctsio);
6431 free(ctsio->kern_data_ptr, M_CTL);
6432 ctl_done((union ctl_io *)ctsio);
6436 return (CTL_RETVAL_COMPLETE);
6441 ctl_mode_select(struct ctl_scsiio *ctsio)
6443 int param_len, pf, sp;
6444 int header_size, bd_len;
6445 int len_left, len_used;
6446 struct ctl_page_index *page_index;
6447 struct ctl_lun *lun;
6448 int control_dev, page_len;
6449 union ctl_modepage_info *modepage_info;
6461 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6463 if (lun->be_lun->lun_type != T_DIRECT)
6468 switch (ctsio->cdb[0]) {
6469 case MODE_SELECT_6: {
6470 struct scsi_mode_select_6 *cdb;
6472 cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
6474 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6475 sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6477 param_len = cdb->length;
6478 header_size = sizeof(struct scsi_mode_header_6);
6481 case MODE_SELECT_10: {
6482 struct scsi_mode_select_10 *cdb;
6484 cdb = (struct scsi_mode_select_10 *)ctsio->cdb;
6486 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;
6487 sp = (cdb->byte2 & SMS_SP) ? 1 : 0;
6489 param_len = scsi_2btoul(cdb->length);
6490 header_size = sizeof(struct scsi_mode_header_10);
6494 ctl_set_invalid_opcode(ctsio);
6495 ctl_done((union ctl_io *)ctsio);
6496 return (CTL_RETVAL_COMPLETE);
6497 break; /* NOTREACHED */
6502 * "A parameter list length of zero indicates that the Data-Out Buffer
6503 * shall be empty. This condition shall not be considered as an error."
6505 if (param_len == 0) {
6506 ctl_set_success(ctsio);
6507 ctl_done((union ctl_io *)ctsio);
6508 return (CTL_RETVAL_COMPLETE);
6512 * Since we'll hit this the first time through, prior to
6513 * allocation, we don't need to free a data buffer here.
6515 if (param_len < header_size) {
6516 ctl_set_param_len_error(ctsio);
6517 ctl_done((union ctl_io *)ctsio);
6518 return (CTL_RETVAL_COMPLETE);
6522 * Allocate the data buffer and grab the user's data. In theory,
6523 * we shouldn't have to sanity check the parameter list length here
6524 * because the maximum size is 64K. We should be able to malloc
6525 * that much without too many problems.
6527 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
6528 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
6529 ctsio->kern_data_len = param_len;
6530 ctsio->kern_total_len = param_len;
6531 ctsio->kern_data_resid = 0;
6532 ctsio->kern_rel_offset = 0;
6533 ctsio->kern_sg_entries = 0;
6534 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6535 ctsio->be_move_done = ctl_config_move_done;
6536 ctl_datamove((union ctl_io *)ctsio);
6538 return (CTL_RETVAL_COMPLETE);
6541 switch (ctsio->cdb[0]) {
6542 case MODE_SELECT_6: {
6543 struct scsi_mode_header_6 *mh6;
6545 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr;
6546 bd_len = mh6->blk_desc_len;
6549 case MODE_SELECT_10: {
6550 struct scsi_mode_header_10 *mh10;
6552 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr;
6553 bd_len = scsi_2btoul(mh10->blk_desc_len);
6557 panic("Invalid CDB type %#x", ctsio->cdb[0]);
6561 if (param_len < (header_size + bd_len)) {
6562 free(ctsio->kern_data_ptr, M_CTL);
6563 ctl_set_param_len_error(ctsio);
6564 ctl_done((union ctl_io *)ctsio);
6565 return (CTL_RETVAL_COMPLETE);
6569 * Set the IO_CONT flag, so that if this I/O gets passed to
6570 * ctl_config_write_done(), it'll get passed back to
6571 * ctl_do_mode_select() for further processing, or completion if
6574 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
6575 ctsio->io_cont = ctl_do_mode_select;
6577 modepage_info = (union ctl_modepage_info *)
6578 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
6580 memset(modepage_info, 0, sizeof(*modepage_info));
6582 len_left = param_len - header_size - bd_len;
6583 len_used = header_size + bd_len;
6585 modepage_info->header.len_left = len_left;
6586 modepage_info->header.len_used = len_used;
6588 return (ctl_do_mode_select((union ctl_io *)ctsio));
6592 ctl_mode_sense(struct ctl_scsiio *ctsio)
6594 struct ctl_lun *lun;
6595 int pc, page_code, dbd, llba, subpage;
6596 int alloc_len, page_len, header_len, total_len;
6597 struct scsi_mode_block_descr *block_desc;
6598 struct ctl_page_index *page_index;
6606 CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
6608 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6610 if (lun->be_lun->lun_type != T_DIRECT)
6615 switch (ctsio->cdb[0]) {
6616 case MODE_SENSE_6: {
6617 struct scsi_mode_sense_6 *cdb;
6619 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb;
6621 header_len = sizeof(struct scsi_mode_hdr_6);
6622 if (cdb->byte2 & SMS_DBD)
6625 header_len += sizeof(struct scsi_mode_block_descr);
6627 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6628 page_code = cdb->page & SMS_PAGE_CODE;
6629 subpage = cdb->subpage;
6630 alloc_len = cdb->length;
6633 case MODE_SENSE_10: {
6634 struct scsi_mode_sense_10 *cdb;
6636 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb;
6638 header_len = sizeof(struct scsi_mode_hdr_10);
6640 if (cdb->byte2 & SMS_DBD)
6643 header_len += sizeof(struct scsi_mode_block_descr);
6644 if (cdb->byte2 & SMS10_LLBAA)
6646 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6;
6647 page_code = cdb->page & SMS_PAGE_CODE;
6648 subpage = cdb->subpage;
6649 alloc_len = scsi_2btoul(cdb->length);
6653 ctl_set_invalid_opcode(ctsio);
6654 ctl_done((union ctl_io *)ctsio);
6655 return (CTL_RETVAL_COMPLETE);
6656 break; /* NOTREACHED */
6660 * We have to make a first pass through to calculate the size of
6661 * the pages that match the user's query. Then we allocate enough
6662 * memory to hold it, and actually copy the data into the buffer.
6664 switch (page_code) {
6665 case SMS_ALL_PAGES_PAGE: {
6671 * At the moment, values other than 0 and 0xff here are
6672 * reserved according to SPC-3.
6674 if ((subpage != SMS_SUBPAGE_PAGE_0)
6675 && (subpage != SMS_SUBPAGE_ALL)) {
6676 ctl_set_invalid_field(ctsio,
6682 ctl_done((union ctl_io *)ctsio);
6683 return (CTL_RETVAL_COMPLETE);
6686 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6687 if ((control_dev != 0)
6688 && (lun->mode_pages.index[i].page_flags &
6689 CTL_PAGE_FLAG_DISK_ONLY))
6693 * We don't use this subpage if the user didn't
6694 * request all subpages.
6696 if ((lun->mode_pages.index[i].subpage != 0)
6697 && (subpage == SMS_SUBPAGE_PAGE_0))
6701 printf("found page %#x len %d\n",
6702 lun->mode_pages.index[i].page_code &
6704 lun->mode_pages.index[i].page_len);
6706 page_len += lun->mode_pages.index[i].page_len;
6715 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6716 /* Look for the right page code */
6717 if ((lun->mode_pages.index[i].page_code &
6718 SMPH_PC_MASK) != page_code)
6721 /* Look for the right subpage or the subpage wildcard*/
6722 if ((lun->mode_pages.index[i].subpage != subpage)
6723 && (subpage != SMS_SUBPAGE_ALL))
6726 /* Make sure the page is supported for this dev type */
6727 if ((control_dev != 0)
6728 && (lun->mode_pages.index[i].page_flags &
6729 CTL_PAGE_FLAG_DISK_ONLY))
6733 printf("found page %#x len %d\n",
6734 lun->mode_pages.index[i].page_code &
6736 lun->mode_pages.index[i].page_len);
6739 page_len += lun->mode_pages.index[i].page_len;
6742 if (page_len == 0) {
6743 ctl_set_invalid_field(ctsio,
6749 ctl_done((union ctl_io *)ctsio);
6750 return (CTL_RETVAL_COMPLETE);
6756 total_len = header_len + page_len;
6758 printf("header_len = %d, page_len = %d, total_len = %d\n",
6759 header_len, page_len, total_len);
6762 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
6763 ctsio->kern_sg_entries = 0;
6764 ctsio->kern_data_resid = 0;
6765 ctsio->kern_rel_offset = 0;
6766 if (total_len < alloc_len) {
6767 ctsio->residual = alloc_len - total_len;
6768 ctsio->kern_data_len = total_len;
6769 ctsio->kern_total_len = total_len;
6771 ctsio->residual = 0;
6772 ctsio->kern_data_len = alloc_len;
6773 ctsio->kern_total_len = alloc_len;
6776 switch (ctsio->cdb[0]) {
6777 case MODE_SENSE_6: {
6778 struct scsi_mode_hdr_6 *header;
6780 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr;
6782 header->datalen = MIN(total_len - 1, 254);
6783 if (control_dev == 0) {
6784 header->dev_specific = 0x10; /* DPOFUA */
6785 if ((lun->flags & CTL_LUN_READONLY) ||
6786 (lun->mode_pages.control_page[CTL_PAGE_CURRENT]
6787 .eca_and_aen & SCP_SWP) != 0)
6788 header->dev_specific |= 0x80; /* WP */
6791 header->block_descr_len = 0;
6793 header->block_descr_len =
6794 sizeof(struct scsi_mode_block_descr);
6795 block_desc = (struct scsi_mode_block_descr *)&header[1];
6798 case MODE_SENSE_10: {
6799 struct scsi_mode_hdr_10 *header;
6802 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr;
6804 datalen = MIN(total_len - 2, 65533);
6805 scsi_ulto2b(datalen, header->datalen);
6806 if (control_dev == 0) {
6807 header->dev_specific = 0x10; /* DPOFUA */
6808 if ((lun->flags & CTL_LUN_READONLY) ||
6809 (lun->mode_pages.control_page[CTL_PAGE_CURRENT]
6810 .eca_and_aen & SCP_SWP) != 0)
6811 header->dev_specific |= 0x80; /* WP */
6814 scsi_ulto2b(0, header->block_descr_len);
6816 scsi_ulto2b(sizeof(struct scsi_mode_block_descr),
6817 header->block_descr_len);
6818 block_desc = (struct scsi_mode_block_descr *)&header[1];
6822 panic("invalid CDB type %#x", ctsio->cdb[0]);
6823 break; /* NOTREACHED */
6827 * If we've got a disk, use its blocksize in the block
6828 * descriptor. Otherwise, just set it to 0.
6831 if (control_dev == 0)
6832 scsi_ulto3b(lun->be_lun->blocksize,
6833 block_desc->block_len);
6835 scsi_ulto3b(0, block_desc->block_len);
6838 switch (page_code) {
6839 case SMS_ALL_PAGES_PAGE: {
6842 data_used = header_len;
6843 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6844 struct ctl_page_index *page_index;
6846 page_index = &lun->mode_pages.index[i];
6848 if ((control_dev != 0)
6849 && (page_index->page_flags &
6850 CTL_PAGE_FLAG_DISK_ONLY))
6854 * We don't use this subpage if the user didn't
6855 * request all subpages. We already checked (above)
6856 * to make sure the user only specified a subpage
6857 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case.
6859 if ((page_index->subpage != 0)
6860 && (subpage == SMS_SUBPAGE_PAGE_0))
6864 * Call the handler, if it exists, to update the
6865 * page to the latest values.
6867 if (page_index->sense_handler != NULL)
6868 page_index->sense_handler(ctsio, page_index,pc);
6870 memcpy(ctsio->kern_data_ptr + data_used,
6871 page_index->page_data +
6872 (page_index->page_len * pc),
6873 page_index->page_len);
6874 data_used += page_index->page_len;
6881 data_used = header_len;
6883 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) {
6884 struct ctl_page_index *page_index;
6886 page_index = &lun->mode_pages.index[i];
6888 /* Look for the right page code */
6889 if ((page_index->page_code & SMPH_PC_MASK) != page_code)
6892 /* Look for the right subpage or the subpage wildcard*/
6893 if ((page_index->subpage != subpage)
6894 && (subpage != SMS_SUBPAGE_ALL))
6897 /* Make sure the page is supported for this dev type */
6898 if ((control_dev != 0)
6899 && (page_index->page_flags &
6900 CTL_PAGE_FLAG_DISK_ONLY))
6904 * Call the handler, if it exists, to update the
6905 * page to the latest values.
6907 if (page_index->sense_handler != NULL)
6908 page_index->sense_handler(ctsio, page_index,pc);
6910 memcpy(ctsio->kern_data_ptr + data_used,
6911 page_index->page_data +
6912 (page_index->page_len * pc),
6913 page_index->page_len);
6914 data_used += page_index->page_len;
6920 ctl_set_success(ctsio);
6921 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
6922 ctsio->be_move_done = ctl_config_move_done;
6923 ctl_datamove((union ctl_io *)ctsio);
6924 return (CTL_RETVAL_COMPLETE);
6928 ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
6929 struct ctl_page_index *page_index,
6932 struct ctl_lun *lun;
6933 struct scsi_log_param_header *phdr;
6937 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
6938 data = page_index->page_data;
6940 if (lun->backend->lun_attr != NULL &&
6941 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail"))
6943 phdr = (struct scsi_log_param_header *)data;
6944 scsi_ulto2b(0x0001, phdr->param_code);
6945 phdr->param_control = SLP_LBIN | SLP_LP;
6946 phdr->param_len = 8;
6947 data = (uint8_t *)(phdr + 1);
6948 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6949 data[4] = 0x02; /* per-pool */
6950 data += phdr->param_len;
6953 if (lun->backend->lun_attr != NULL &&
6954 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused"))
6956 phdr = (struct scsi_log_param_header *)data;
6957 scsi_ulto2b(0x0002, phdr->param_code);
6958 phdr->param_control = SLP_LBIN | SLP_LP;
6959 phdr->param_len = 8;
6960 data = (uint8_t *)(phdr + 1);
6961 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6962 data[4] = 0x01; /* per-LUN */
6963 data += phdr->param_len;
6966 if (lun->backend->lun_attr != NULL &&
6967 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail"))
6969 phdr = (struct scsi_log_param_header *)data;
6970 scsi_ulto2b(0x00f1, phdr->param_code);
6971 phdr->param_control = SLP_LBIN | SLP_LP;
6972 phdr->param_len = 8;
6973 data = (uint8_t *)(phdr + 1);
6974 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6975 data[4] = 0x02; /* per-pool */
6976 data += phdr->param_len;
6979 if (lun->backend->lun_attr != NULL &&
6980 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused"))
6982 phdr = (struct scsi_log_param_header *)data;
6983 scsi_ulto2b(0x00f2, phdr->param_code);
6984 phdr->param_control = SLP_LBIN | SLP_LP;
6985 phdr->param_len = 8;
6986 data = (uint8_t *)(phdr + 1);
6987 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
6988 data[4] = 0x02; /* per-pool */
6989 data += phdr->param_len;
6992 page_index->page_len = data - page_index->page_data;
6997 ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio,
6998 struct ctl_page_index *page_index,
7001 struct ctl_lun *lun;
7002 struct stat_page *data;
7003 uint64_t rn, wn, rb, wb;
7004 struct bintime rt, wt;
7007 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7008 data = (struct stat_page *)page_index->page_data;
7010 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code);
7011 data->sap.hdr.param_control = SLP_LBIN;
7012 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) -
7013 sizeof(struct scsi_log_param_header);
7014 rn = wn = rb = wb = 0;
7017 for (i = 0; i < CTL_MAX_PORTS; i++) {
7018 rn += lun->stats.ports[i].operations[CTL_STATS_READ];
7019 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE];
7020 rb += lun->stats.ports[i].bytes[CTL_STATS_READ];
7021 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE];
7022 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]);
7023 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]);
7025 scsi_u64to8b(rn, data->sap.read_num);
7026 scsi_u64to8b(wn, data->sap.write_num);
7027 if (lun->stats.blocksize > 0) {
7028 scsi_u64to8b(wb / lun->stats.blocksize,
7029 data->sap.recvieved_lba);
7030 scsi_u64to8b(rb / lun->stats.blocksize,
7031 data->sap.transmitted_lba);
7033 scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000),
7034 data->sap.read_int);
7035 scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000),
7036 data->sap.write_int);
7037 scsi_u64to8b(0, data->sap.weighted_num);
7038 scsi_u64to8b(0, data->sap.weighted_int);
7039 scsi_ulto2b(SLP_IT, data->it.hdr.param_code);
7040 data->it.hdr.param_control = SLP_LBIN;
7041 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) -
7042 sizeof(struct scsi_log_param_header);
7044 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int);
7046 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code);
7047 data->it.hdr.param_control = SLP_LBIN;
7048 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) -
7049 sizeof(struct scsi_log_param_header);
7050 scsi_ulto4b(3, data->ti.exponent);
7051 scsi_ulto4b(1, data->ti.integer);
7053 page_index->page_len = sizeof(*data);
7058 ctl_log_sense(struct ctl_scsiio *ctsio)
7060 struct ctl_lun *lun;
7061 int i, pc, page_code, subpage;
7062 int alloc_len, total_len;
7063 struct ctl_page_index *page_index;
7064 struct scsi_log_sense *cdb;
7065 struct scsi_log_header *header;
7067 CTL_DEBUG_PRINT(("ctl_log_sense\n"));
7069 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7070 cdb = (struct scsi_log_sense *)ctsio->cdb;
7071 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6;
7072 page_code = cdb->page & SLS_PAGE_CODE;
7073 subpage = cdb->subpage;
7074 alloc_len = scsi_2btoul(cdb->length);
7077 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) {
7078 page_index = &lun->log_pages.index[i];
7080 /* Look for the right page code */
7081 if ((page_index->page_code & SL_PAGE_CODE) != page_code)
7084 /* Look for the right subpage or the subpage wildcard*/
7085 if (page_index->subpage != subpage)
7090 if (i >= CTL_NUM_LOG_PAGES) {
7091 ctl_set_invalid_field(ctsio,
7097 ctl_done((union ctl_io *)ctsio);
7098 return (CTL_RETVAL_COMPLETE);
7101 total_len = sizeof(struct scsi_log_header) + page_index->page_len;
7103 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7104 ctsio->kern_sg_entries = 0;
7105 ctsio->kern_data_resid = 0;
7106 ctsio->kern_rel_offset = 0;
7107 if (total_len < alloc_len) {
7108 ctsio->residual = alloc_len - total_len;
7109 ctsio->kern_data_len = total_len;
7110 ctsio->kern_total_len = total_len;
7112 ctsio->residual = 0;
7113 ctsio->kern_data_len = alloc_len;
7114 ctsio->kern_total_len = alloc_len;
7117 header = (struct scsi_log_header *)ctsio->kern_data_ptr;
7118 header->page = page_index->page_code;
7119 if (page_index->subpage) {
7120 header->page |= SL_SPF;
7121 header->subpage = page_index->subpage;
7123 scsi_ulto2b(page_index->page_len, header->datalen);
7126 * Call the handler, if it exists, to update the
7127 * page to the latest values.
7129 if (page_index->sense_handler != NULL)
7130 page_index->sense_handler(ctsio, page_index, pc);
7132 memcpy(header + 1, page_index->page_data, page_index->page_len);
7134 ctl_set_success(ctsio);
7135 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7136 ctsio->be_move_done = ctl_config_move_done;
7137 ctl_datamove((union ctl_io *)ctsio);
7138 return (CTL_RETVAL_COMPLETE);
7142 ctl_read_capacity(struct ctl_scsiio *ctsio)
7144 struct scsi_read_capacity *cdb;
7145 struct scsi_read_capacity_data *data;
7146 struct ctl_lun *lun;
7149 CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
7151 cdb = (struct scsi_read_capacity *)ctsio->cdb;
7153 lba = scsi_4btoul(cdb->addr);
7154 if (((cdb->pmi & SRC_PMI) == 0)
7156 ctl_set_invalid_field(/*ctsio*/ ctsio,
7162 ctl_done((union ctl_io *)ctsio);
7163 return (CTL_RETVAL_COMPLETE);
7166 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7168 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
7169 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
7170 ctsio->residual = 0;
7171 ctsio->kern_data_len = sizeof(*data);
7172 ctsio->kern_total_len = sizeof(*data);
7173 ctsio->kern_data_resid = 0;
7174 ctsio->kern_rel_offset = 0;
7175 ctsio->kern_sg_entries = 0;
7178 * If the maximum LBA is greater than 0xfffffffe, the user must
7179 * issue a SERVICE ACTION IN (16) command, with the read capacity
7180 * serivce action set.
7182 if (lun->be_lun->maxlba > 0xfffffffe)
7183 scsi_ulto4b(0xffffffff, data->addr);
7185 scsi_ulto4b(lun->be_lun->maxlba, data->addr);
7188 * XXX KDM this may not be 512 bytes...
7190 scsi_ulto4b(lun->be_lun->blocksize, data->length);
7192 ctl_set_success(ctsio);
7193 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7194 ctsio->be_move_done = ctl_config_move_done;
7195 ctl_datamove((union ctl_io *)ctsio);
7196 return (CTL_RETVAL_COMPLETE);
7200 ctl_read_capacity_16(struct ctl_scsiio *ctsio)
7202 struct scsi_read_capacity_16 *cdb;
7203 struct scsi_read_capacity_data_long *data;
7204 struct ctl_lun *lun;
7208 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n"));
7210 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb;
7212 alloc_len = scsi_4btoul(cdb->alloc_len);
7213 lba = scsi_8btou64(cdb->addr);
7215 if ((cdb->reladr & SRC16_PMI)
7217 ctl_set_invalid_field(/*ctsio*/ ctsio,
7223 ctl_done((union ctl_io *)ctsio);
7224 return (CTL_RETVAL_COMPLETE);
7227 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7229 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
7230 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
7232 if (sizeof(*data) < alloc_len) {
7233 ctsio->residual = alloc_len - sizeof(*data);
7234 ctsio->kern_data_len = sizeof(*data);
7235 ctsio->kern_total_len = sizeof(*data);
7237 ctsio->residual = 0;
7238 ctsio->kern_data_len = alloc_len;
7239 ctsio->kern_total_len = alloc_len;
7241 ctsio->kern_data_resid = 0;
7242 ctsio->kern_rel_offset = 0;
7243 ctsio->kern_sg_entries = 0;
7245 scsi_u64to8b(lun->be_lun->maxlba, data->addr);
7246 /* XXX KDM this may not be 512 bytes... */
7247 scsi_ulto4b(lun->be_lun->blocksize, data->length);
7248 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE;
7249 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp);
7250 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP)
7251 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ;
7253 ctl_set_success(ctsio);
7254 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7255 ctsio->be_move_done = ctl_config_move_done;
7256 ctl_datamove((union ctl_io *)ctsio);
7257 return (CTL_RETVAL_COMPLETE);
7261 ctl_get_lba_status(struct ctl_scsiio *ctsio)
7263 struct scsi_get_lba_status *cdb;
7264 struct scsi_get_lba_status_data *data;
7265 struct ctl_lun *lun;
7266 struct ctl_lba_len_flags *lbalen;
7268 uint32_t alloc_len, total_len;
7271 CTL_DEBUG_PRINT(("ctl_get_lba_status\n"));
7273 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7274 cdb = (struct scsi_get_lba_status *)ctsio->cdb;
7275 lba = scsi_8btou64(cdb->addr);
7276 alloc_len = scsi_4btoul(cdb->alloc_len);
7278 if (lba > lun->be_lun->maxlba) {
7279 ctl_set_lba_out_of_range(ctsio);
7280 ctl_done((union ctl_io *)ctsio);
7281 return (CTL_RETVAL_COMPLETE);
7284 total_len = sizeof(*data) + sizeof(data->descr[0]);
7285 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7286 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr;
7288 if (total_len < alloc_len) {
7289 ctsio->residual = alloc_len - total_len;
7290 ctsio->kern_data_len = total_len;
7291 ctsio->kern_total_len = total_len;
7293 ctsio->residual = 0;
7294 ctsio->kern_data_len = alloc_len;
7295 ctsio->kern_total_len = alloc_len;
7297 ctsio->kern_data_resid = 0;
7298 ctsio->kern_rel_offset = 0;
7299 ctsio->kern_sg_entries = 0;
7301 /* Fill dummy data in case backend can't tell anything. */
7302 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length);
7303 scsi_u64to8b(lba, data->descr[0].addr);
7304 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba),
7305 data->descr[0].length);
7306 data->descr[0].status = 0; /* Mapped or unknown. */
7308 ctl_set_success(ctsio);
7309 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7310 ctsio->be_move_done = ctl_config_move_done;
7312 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
7314 lbalen->len = total_len;
7316 retval = lun->backend->config_read((union ctl_io *)ctsio);
7317 return (CTL_RETVAL_COMPLETE);
7321 ctl_read_defect(struct ctl_scsiio *ctsio)
7323 struct scsi_read_defect_data_10 *ccb10;
7324 struct scsi_read_defect_data_12 *ccb12;
7325 struct scsi_read_defect_data_hdr_10 *data10;
7326 struct scsi_read_defect_data_hdr_12 *data12;
7327 uint32_t alloc_len, data_len;
7330 CTL_DEBUG_PRINT(("ctl_read_defect\n"));
7332 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
7333 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb;
7334 format = ccb10->format;
7335 alloc_len = scsi_2btoul(ccb10->alloc_length);
7336 data_len = sizeof(*data10);
7338 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb;
7339 format = ccb12->format;
7340 alloc_len = scsi_4btoul(ccb12->alloc_length);
7341 data_len = sizeof(*data12);
7343 if (alloc_len == 0) {
7344 ctl_set_success(ctsio);
7345 ctl_done((union ctl_io *)ctsio);
7346 return (CTL_RETVAL_COMPLETE);
7349 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
7350 if (data_len < alloc_len) {
7351 ctsio->residual = alloc_len - data_len;
7352 ctsio->kern_data_len = data_len;
7353 ctsio->kern_total_len = data_len;
7355 ctsio->residual = 0;
7356 ctsio->kern_data_len = alloc_len;
7357 ctsio->kern_total_len = alloc_len;
7359 ctsio->kern_data_resid = 0;
7360 ctsio->kern_rel_offset = 0;
7361 ctsio->kern_sg_entries = 0;
7363 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) {
7364 data10 = (struct scsi_read_defect_data_hdr_10 *)
7365 ctsio->kern_data_ptr;
7366 data10->format = format;
7367 scsi_ulto2b(0, data10->length);
7369 data12 = (struct scsi_read_defect_data_hdr_12 *)
7370 ctsio->kern_data_ptr;
7371 data12->format = format;
7372 scsi_ulto2b(0, data12->generation);
7373 scsi_ulto4b(0, data12->length);
7376 ctl_set_success(ctsio);
7377 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7378 ctsio->be_move_done = ctl_config_move_done;
7379 ctl_datamove((union ctl_io *)ctsio);
7380 return (CTL_RETVAL_COMPLETE);
7384 ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
7386 struct scsi_maintenance_in *cdb;
7388 int alloc_len, ext, total_len = 0, g, p, pc, pg, gs, os;
7389 int num_target_port_groups, num_target_ports;
7390 struct ctl_lun *lun;
7391 struct ctl_softc *softc;
7392 struct ctl_port *port;
7393 struct scsi_target_group_data *rtg_ptr;
7394 struct scsi_target_group_data_extended *rtg_ext_ptr;
7395 struct scsi_target_port_group_descriptor *tpg_desc;
7397 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n"));
7399 cdb = (struct scsi_maintenance_in *)ctsio->cdb;
7400 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7401 softc = lun->ctl_softc;
7403 retval = CTL_RETVAL_COMPLETE;
7405 switch (cdb->byte2 & STG_PDF_MASK) {
7406 case STG_PDF_LENGTH:
7409 case STG_PDF_EXTENDED:
7413 ctl_set_invalid_field(/*ctsio*/ ctsio,
7419 ctl_done((union ctl_io *)ctsio);
7423 if (softc->is_single)
7424 num_target_port_groups = 1;
7426 num_target_port_groups = NUM_TARGET_PORT_GROUPS;
7427 num_target_ports = 0;
7428 mtx_lock(&softc->ctl_lock);
7429 STAILQ_FOREACH(port, &softc->port_list, links) {
7430 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7432 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
7436 mtx_unlock(&softc->ctl_lock);
7439 total_len = sizeof(struct scsi_target_group_data_extended);
7441 total_len = sizeof(struct scsi_target_group_data);
7442 total_len += sizeof(struct scsi_target_port_group_descriptor) *
7443 num_target_port_groups +
7444 sizeof(struct scsi_target_port_descriptor) *
7445 num_target_ports * num_target_port_groups;
7447 alloc_len = scsi_4btoul(cdb->length);
7449 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7451 ctsio->kern_sg_entries = 0;
7453 if (total_len < alloc_len) {
7454 ctsio->residual = alloc_len - total_len;
7455 ctsio->kern_data_len = total_len;
7456 ctsio->kern_total_len = total_len;
7458 ctsio->residual = 0;
7459 ctsio->kern_data_len = alloc_len;
7460 ctsio->kern_total_len = alloc_len;
7462 ctsio->kern_data_resid = 0;
7463 ctsio->kern_rel_offset = 0;
7466 rtg_ext_ptr = (struct scsi_target_group_data_extended *)
7467 ctsio->kern_data_ptr;
7468 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length);
7469 rtg_ext_ptr->format_type = 0x10;
7470 rtg_ext_ptr->implicit_transition_time = 0;
7471 tpg_desc = &rtg_ext_ptr->groups[0];
7473 rtg_ptr = (struct scsi_target_group_data *)
7474 ctsio->kern_data_ptr;
7475 scsi_ulto4b(total_len - 4, rtg_ptr->length);
7476 tpg_desc = &rtg_ptr->groups[0];
7479 mtx_lock(&softc->ctl_lock);
7480 pg = softc->port_offset / CTL_MAX_PORTS;
7481 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) {
7482 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) {
7483 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7484 os = TPG_ASYMMETRIC_ACCESS_STANDBY;
7485 } else if (lun->flags & CTL_LUN_PRIMARY_SC) {
7486 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7487 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7489 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED;
7490 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7493 gs = TPG_ASYMMETRIC_ACCESS_STANDBY;
7494 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED;
7496 for (g = 0; g < num_target_port_groups; g++) {
7497 tpg_desc->pref_state = (g == pg) ? gs : os;
7498 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP;
7499 scsi_ulto2b(g + 1, tpg_desc->target_port_group);
7500 tpg_desc->status = TPG_IMPLICIT;
7502 STAILQ_FOREACH(port, &softc->port_list, links) {
7503 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
7505 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
7507 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
7508 scsi_ulto2b(p, tpg_desc->descriptors[pc].
7509 relative_target_port_identifier);
7512 tpg_desc->target_port_count = pc;
7513 tpg_desc = (struct scsi_target_port_group_descriptor *)
7514 &tpg_desc->descriptors[pc];
7516 mtx_unlock(&softc->ctl_lock);
7518 ctl_set_success(ctsio);
7519 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7520 ctsio->be_move_done = ctl_config_move_done;
7521 ctl_datamove((union ctl_io *)ctsio);
7526 ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
7528 struct ctl_lun *lun;
7529 struct scsi_report_supported_opcodes *cdb;
7530 const struct ctl_cmd_entry *entry, *sentry;
7531 struct scsi_report_supported_opcodes_all *all;
7532 struct scsi_report_supported_opcodes_descr *descr;
7533 struct scsi_report_supported_opcodes_one *one;
7535 int alloc_len, total_len;
7536 int opcode, service_action, i, j, num;
7538 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n"));
7540 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb;
7541 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7543 retval = CTL_RETVAL_COMPLETE;
7545 opcode = cdb->requested_opcode;
7546 service_action = scsi_2btoul(cdb->requested_service_action);
7547 switch (cdb->options & RSO_OPTIONS_MASK) {
7548 case RSO_OPTIONS_ALL:
7550 for (i = 0; i < 256; i++) {
7551 entry = &ctl_cmd_table[i];
7552 if (entry->flags & CTL_CMD_FLAG_SA5) {
7553 for (j = 0; j < 32; j++) {
7554 sentry = &((const struct ctl_cmd_entry *)
7556 if (ctl_cmd_applicable(
7557 lun->be_lun->lun_type, sentry))
7561 if (ctl_cmd_applicable(lun->be_lun->lun_type,
7566 total_len = sizeof(struct scsi_report_supported_opcodes_all) +
7567 num * sizeof(struct scsi_report_supported_opcodes_descr);
7569 case RSO_OPTIONS_OC:
7570 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) {
7571 ctl_set_invalid_field(/*ctsio*/ ctsio,
7577 ctl_done((union ctl_io *)ctsio);
7578 return (CTL_RETVAL_COMPLETE);
7580 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
7582 case RSO_OPTIONS_OC_SA:
7583 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 ||
7584 service_action >= 32) {
7585 ctl_set_invalid_field(/*ctsio*/ ctsio,
7591 ctl_done((union ctl_io *)ctsio);
7592 return (CTL_RETVAL_COMPLETE);
7594 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32;
7597 ctl_set_invalid_field(/*ctsio*/ ctsio,
7603 ctl_done((union ctl_io *)ctsio);
7604 return (CTL_RETVAL_COMPLETE);
7607 alloc_len = scsi_4btoul(cdb->length);
7609 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7611 ctsio->kern_sg_entries = 0;
7613 if (total_len < alloc_len) {
7614 ctsio->residual = alloc_len - total_len;
7615 ctsio->kern_data_len = total_len;
7616 ctsio->kern_total_len = total_len;
7618 ctsio->residual = 0;
7619 ctsio->kern_data_len = alloc_len;
7620 ctsio->kern_total_len = alloc_len;
7622 ctsio->kern_data_resid = 0;
7623 ctsio->kern_rel_offset = 0;
7625 switch (cdb->options & RSO_OPTIONS_MASK) {
7626 case RSO_OPTIONS_ALL:
7627 all = (struct scsi_report_supported_opcodes_all *)
7628 ctsio->kern_data_ptr;
7630 for (i = 0; i < 256; i++) {
7631 entry = &ctl_cmd_table[i];
7632 if (entry->flags & CTL_CMD_FLAG_SA5) {
7633 for (j = 0; j < 32; j++) {
7634 sentry = &((const struct ctl_cmd_entry *)
7636 if (!ctl_cmd_applicable(
7637 lun->be_lun->lun_type, sentry))
7639 descr = &all->descr[num++];
7641 scsi_ulto2b(j, descr->service_action);
7642 descr->flags = RSO_SERVACTV;
7643 scsi_ulto2b(sentry->length,
7647 if (!ctl_cmd_applicable(lun->be_lun->lun_type,
7650 descr = &all->descr[num++];
7652 scsi_ulto2b(0, descr->service_action);
7654 scsi_ulto2b(entry->length, descr->cdb_length);
7658 num * sizeof(struct scsi_report_supported_opcodes_descr),
7661 case RSO_OPTIONS_OC:
7662 one = (struct scsi_report_supported_opcodes_one *)
7663 ctsio->kern_data_ptr;
7664 entry = &ctl_cmd_table[opcode];
7666 case RSO_OPTIONS_OC_SA:
7667 one = (struct scsi_report_supported_opcodes_one *)
7668 ctsio->kern_data_ptr;
7669 entry = &ctl_cmd_table[opcode];
7670 entry = &((const struct ctl_cmd_entry *)
7671 entry->execute)[service_action];
7673 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
7675 scsi_ulto2b(entry->length, one->cdb_length);
7676 one->cdb_usage[0] = opcode;
7677 memcpy(&one->cdb_usage[1], entry->usage,
7684 ctl_set_success(ctsio);
7685 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7686 ctsio->be_move_done = ctl_config_move_done;
7687 ctl_datamove((union ctl_io *)ctsio);
7692 ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
7694 struct scsi_report_supported_tmf *cdb;
7695 struct scsi_report_supported_tmf_data *data;
7697 int alloc_len, total_len;
7699 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n"));
7701 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb;
7703 retval = CTL_RETVAL_COMPLETE;
7705 total_len = sizeof(struct scsi_report_supported_tmf_data);
7706 alloc_len = scsi_4btoul(cdb->length);
7708 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7710 ctsio->kern_sg_entries = 0;
7712 if (total_len < alloc_len) {
7713 ctsio->residual = alloc_len - total_len;
7714 ctsio->kern_data_len = total_len;
7715 ctsio->kern_total_len = total_len;
7717 ctsio->residual = 0;
7718 ctsio->kern_data_len = alloc_len;
7719 ctsio->kern_total_len = alloc_len;
7721 ctsio->kern_data_resid = 0;
7722 ctsio->kern_rel_offset = 0;
7724 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr;
7725 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS;
7726 data->byte2 |= RST_ITNRS;
7728 ctl_set_success(ctsio);
7729 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7730 ctsio->be_move_done = ctl_config_move_done;
7731 ctl_datamove((union ctl_io *)ctsio);
7736 ctl_report_timestamp(struct ctl_scsiio *ctsio)
7738 struct scsi_report_timestamp *cdb;
7739 struct scsi_report_timestamp_data *data;
7743 int alloc_len, total_len;
7745 CTL_DEBUG_PRINT(("ctl_report_timestamp\n"));
7747 cdb = (struct scsi_report_timestamp *)ctsio->cdb;
7749 retval = CTL_RETVAL_COMPLETE;
7751 total_len = sizeof(struct scsi_report_timestamp_data);
7752 alloc_len = scsi_4btoul(cdb->length);
7754 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7756 ctsio->kern_sg_entries = 0;
7758 if (total_len < alloc_len) {
7759 ctsio->residual = alloc_len - total_len;
7760 ctsio->kern_data_len = total_len;
7761 ctsio->kern_total_len = total_len;
7763 ctsio->residual = 0;
7764 ctsio->kern_data_len = alloc_len;
7765 ctsio->kern_total_len = alloc_len;
7767 ctsio->kern_data_resid = 0;
7768 ctsio->kern_rel_offset = 0;
7770 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr;
7771 scsi_ulto2b(sizeof(*data) - 2, data->length);
7772 data->origin = RTS_ORIG_OUTSIDE;
7774 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000;
7775 scsi_ulto4b(timestamp >> 16, data->timestamp);
7776 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]);
7778 ctl_set_success(ctsio);
7779 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
7780 ctsio->be_move_done = ctl_config_move_done;
7781 ctl_datamove((union ctl_io *)ctsio);
7786 ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
7788 struct scsi_per_res_in *cdb;
7789 int alloc_len, total_len = 0;
7790 /* struct scsi_per_res_in_rsrv in_data; */
7791 struct ctl_lun *lun;
7792 struct ctl_softc *softc;
7795 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
7797 cdb = (struct scsi_per_res_in *)ctsio->cdb;
7799 alloc_len = scsi_2btoul(cdb->length);
7801 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
7802 softc = lun->ctl_softc;
7805 mtx_lock(&lun->lun_lock);
7806 switch (cdb->action) {
7807 case SPRI_RK: /* read keys */
7808 total_len = sizeof(struct scsi_per_res_in_keys) +
7810 sizeof(struct scsi_per_res_key);
7812 case SPRI_RR: /* read reservation */
7813 if (lun->flags & CTL_LUN_PR_RESERVED)
7814 total_len = sizeof(struct scsi_per_res_in_rsrv);
7816 total_len = sizeof(struct scsi_per_res_in_header);
7818 case SPRI_RC: /* report capabilities */
7819 total_len = sizeof(struct scsi_per_res_cap);
7821 case SPRI_RS: /* read full status */
7822 total_len = sizeof(struct scsi_per_res_in_header) +
7823 (sizeof(struct scsi_per_res_in_full_desc) + 256) *
7827 panic("Invalid PR type %x", cdb->action);
7829 mtx_unlock(&lun->lun_lock);
7831 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO);
7833 if (total_len < alloc_len) {
7834 ctsio->residual = alloc_len - total_len;
7835 ctsio->kern_data_len = total_len;
7836 ctsio->kern_total_len = total_len;
7838 ctsio->residual = 0;
7839 ctsio->kern_data_len = alloc_len;
7840 ctsio->kern_total_len = alloc_len;
7843 ctsio->kern_data_resid = 0;
7844 ctsio->kern_rel_offset = 0;
7845 ctsio->kern_sg_entries = 0;
7847 mtx_lock(&lun->lun_lock);
7848 switch (cdb->action) {
7849 case SPRI_RK: { // read keys
7850 struct scsi_per_res_in_keys *res_keys;
7853 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr;
7856 * We had to drop the lock to allocate our buffer, which
7857 * leaves time for someone to come in with another
7858 * persistent reservation. (That is unlikely, though,
7859 * since this should be the only persistent reservation
7860 * command active right now.)
7862 if (total_len != (sizeof(struct scsi_per_res_in_keys) +
7863 (lun->pr_key_count *
7864 sizeof(struct scsi_per_res_key)))){
7865 mtx_unlock(&lun->lun_lock);
7866 free(ctsio->kern_data_ptr, M_CTL);
7867 printf("%s: reservation length changed, retrying\n",
7872 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation);
7874 scsi_ulto4b(sizeof(struct scsi_per_res_key) *
7875 lun->pr_key_count, res_keys->header.length);
7877 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) {
7878 if ((key = ctl_get_prkey(lun, i)) == 0)
7882 * We used lun->pr_key_count to calculate the
7883 * size to allocate. If it turns out the number of
7884 * initiators with the registered flag set is
7885 * larger than that (i.e. they haven't been kept in
7886 * sync), we've got a problem.
7888 if (key_count >= lun->pr_key_count) {
7890 csevent_log(CSC_CTL | CSC_SHELF_SW |
7892 csevent_LogType_Fault,
7893 csevent_AlertLevel_Yellow,
7894 csevent_FRU_ShelfController,
7895 csevent_FRU_Firmware,
7896 csevent_FRU_Unknown,
7897 "registered keys %d >= key "
7898 "count %d", key_count,
7904 scsi_u64to8b(key, res_keys->keys[key_count].key);
7909 case SPRI_RR: { // read reservation
7910 struct scsi_per_res_in_rsrv *res;
7911 int tmp_len, header_only;
7913 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr;
7915 scsi_ulto4b(lun->PRGeneration, res->header.generation);
7917 if (lun->flags & CTL_LUN_PR_RESERVED)
7919 tmp_len = sizeof(struct scsi_per_res_in_rsrv);
7920 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data),
7921 res->header.length);
7924 tmp_len = sizeof(struct scsi_per_res_in_header);
7925 scsi_ulto4b(0, res->header.length);
7930 * We had to drop the lock to allocate our buffer, which
7931 * leaves time for someone to come in with another
7932 * persistent reservation. (That is unlikely, though,
7933 * since this should be the only persistent reservation
7934 * command active right now.)
7936 if (tmp_len != total_len) {
7937 mtx_unlock(&lun->lun_lock);
7938 free(ctsio->kern_data_ptr, M_CTL);
7939 printf("%s: reservation status changed, retrying\n",
7945 * No reservation held, so we're done.
7947 if (header_only != 0)
7951 * If the registration is an All Registrants type, the key
7952 * is 0, since it doesn't really matter.
7954 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
7955 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx),
7956 res->data.reservation);
7958 res->data.scopetype = lun->res_type;
7961 case SPRI_RC: //report capabilities
7963 struct scsi_per_res_cap *res_cap;
7966 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr;
7967 scsi_ulto2b(sizeof(*res_cap), res_cap->length);
7968 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5;
7969 type_mask = SPRI_TM_WR_EX_AR |
7975 scsi_ulto2b(type_mask, res_cap->type_mask);
7978 case SPRI_RS: { // read full status
7979 struct scsi_per_res_in_full *res_status;
7980 struct scsi_per_res_in_full_desc *res_desc;
7981 struct ctl_port *port;
7984 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr;
7987 * We had to drop the lock to allocate our buffer, which
7988 * leaves time for someone to come in with another
7989 * persistent reservation. (That is unlikely, though,
7990 * since this should be the only persistent reservation
7991 * command active right now.)
7993 if (total_len < (sizeof(struct scsi_per_res_in_header) +
7994 (sizeof(struct scsi_per_res_in_full_desc) + 256) *
7995 lun->pr_key_count)){
7996 mtx_unlock(&lun->lun_lock);
7997 free(ctsio->kern_data_ptr, M_CTL);
7998 printf("%s: reservation length changed, retrying\n",
8003 scsi_ulto4b(lun->PRGeneration, res_status->header.generation);
8005 res_desc = &res_status->desc[0];
8006 for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) {
8007 if ((key = ctl_get_prkey(lun, i)) == 0)
8010 scsi_u64to8b(key, res_desc->res_key.key);
8011 if ((lun->flags & CTL_LUN_PR_RESERVED) &&
8012 (lun->pr_res_idx == i ||
8013 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) {
8014 res_desc->flags = SPRI_FULL_R_HOLDER;
8015 res_desc->scopetype = lun->res_type;
8017 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT,
8018 res_desc->rel_trgt_port_id);
8020 port = softc->ctl_ports[
8021 ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)];
8023 len = ctl_create_iid(port,
8024 i % CTL_MAX_INIT_PER_PORT,
8025 res_desc->transport_id);
8026 scsi_ulto4b(len, res_desc->additional_length);
8027 res_desc = (struct scsi_per_res_in_full_desc *)
8028 &res_desc->transport_id[len];
8030 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0],
8031 res_status->header.length);
8036 * This is a bug, because we just checked for this above,
8037 * and should have returned an error.
8039 panic("Invalid PR type %x", cdb->action);
8040 break; /* NOTREACHED */
8042 mtx_unlock(&lun->lun_lock);
8044 ctl_set_success(ctsio);
8045 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
8046 ctsio->be_move_done = ctl_config_move_done;
8047 ctl_datamove((union ctl_io *)ctsio);
8048 return (CTL_RETVAL_COMPLETE);
8052 ctl_est_res_ua(struct ctl_lun *lun, uint32_t residx, ctl_ua_type ua)
8054 int off = lun->ctl_softc->persis_offset;
8056 if (residx >= off && residx < off + CTL_MAX_INITIATORS)
8057 ctl_est_ua(lun, residx - off, ua);
8061 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if
8065 ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key,
8066 uint64_t sa_res_key, uint8_t type, uint32_t residx,
8067 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb,
8068 struct scsi_per_res_out_parms* param)
8070 union ctl_ha_msg persis_io;
8076 mtx_lock(&lun->lun_lock);
8077 if (sa_res_key == 0) {
8078 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8079 /* validate scope and type */
8080 if ((cdb->scope_type & SPR_SCOPE_MASK) !=
8082 mtx_unlock(&lun->lun_lock);
8083 ctl_set_invalid_field(/*ctsio*/ ctsio,
8089 ctl_done((union ctl_io *)ctsio);
8093 if (type>8 || type==2 || type==4 || type==0) {
8094 mtx_unlock(&lun->lun_lock);
8095 ctl_set_invalid_field(/*ctsio*/ ctsio,
8101 ctl_done((union ctl_io *)ctsio);
8106 * Unregister everybody else and build UA for
8109 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8110 if (i == residx || ctl_get_prkey(lun, i) == 0)
8113 ctl_clr_prkey(lun, i);
8114 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
8116 lun->pr_key_count = 1;
8117 lun->res_type = type;
8118 if (lun->res_type != SPR_TYPE_WR_EX_AR
8119 && lun->res_type != SPR_TYPE_EX_AC_AR)
8120 lun->pr_res_idx = residx;
8122 /* send msg to other side */
8123 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8124 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8125 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8126 persis_io.pr.pr_info.residx = lun->pr_res_idx;
8127 persis_io.pr.pr_info.res_type = type;
8128 memcpy(persis_io.pr.pr_info.sa_res_key,
8129 param->serv_act_res_key,
8130 sizeof(param->serv_act_res_key));
8131 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8132 &persis_io, sizeof(persis_io), 0)) >
8133 CTL_HA_STATUS_SUCCESS) {
8134 printf("CTL:Persis Out error returned "
8135 "from ctl_ha_msg_send %d\n",
8139 /* not all registrants */
8140 mtx_unlock(&lun->lun_lock);
8141 free(ctsio->kern_data_ptr, M_CTL);
8142 ctl_set_invalid_field(ctsio,
8148 ctl_done((union ctl_io *)ctsio);
8151 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
8152 || !(lun->flags & CTL_LUN_PR_RESERVED)) {
8155 if (res_key == sa_res_key) {
8158 * The spec implies this is not good but doesn't
8159 * say what to do. There are two choices either
8160 * generate a res conflict or check condition
8161 * with illegal field in parameter data. Since
8162 * that is what is done when the sa_res_key is
8163 * zero I'll take that approach since this has
8164 * to do with the sa_res_key.
8166 mtx_unlock(&lun->lun_lock);
8167 free(ctsio->kern_data_ptr, M_CTL);
8168 ctl_set_invalid_field(ctsio,
8174 ctl_done((union ctl_io *)ctsio);
8178 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8179 if (ctl_get_prkey(lun, i) != sa_res_key)
8183 ctl_clr_prkey(lun, i);
8184 lun->pr_key_count--;
8185 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
8188 mtx_unlock(&lun->lun_lock);
8189 free(ctsio->kern_data_ptr, M_CTL);
8190 ctl_set_reservation_conflict(ctsio);
8191 ctl_done((union ctl_io *)ctsio);
8192 return (CTL_RETVAL_COMPLETE);
8194 /* send msg to other side */
8195 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8196 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8197 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8198 persis_io.pr.pr_info.residx = lun->pr_res_idx;
8199 persis_io.pr.pr_info.res_type = type;
8200 memcpy(persis_io.pr.pr_info.sa_res_key,
8201 param->serv_act_res_key,
8202 sizeof(param->serv_act_res_key));
8203 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8204 &persis_io, sizeof(persis_io), 0)) >
8205 CTL_HA_STATUS_SUCCESS) {
8206 printf("CTL:Persis Out error returned from "
8207 "ctl_ha_msg_send %d\n", isc_retval);
8210 /* Reserved but not all registrants */
8211 /* sa_res_key is res holder */
8212 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) {
8213 /* validate scope and type */
8214 if ((cdb->scope_type & SPR_SCOPE_MASK) !=
8216 mtx_unlock(&lun->lun_lock);
8217 ctl_set_invalid_field(/*ctsio*/ ctsio,
8223 ctl_done((union ctl_io *)ctsio);
8227 if (type>8 || type==2 || type==4 || type==0) {
8228 mtx_unlock(&lun->lun_lock);
8229 ctl_set_invalid_field(/*ctsio*/ ctsio,
8235 ctl_done((union ctl_io *)ctsio);
8241 * if sa_res_key != res_key remove all
8242 * registrants w/sa_res_key and generate UA
8243 * for these registrants(Registrations
8244 * Preempted) if it wasn't an exclusive
8245 * reservation generate UA(Reservations
8246 * Preempted) for all other registered nexuses
8247 * if the type has changed. Establish the new
8248 * reservation and holder. If res_key and
8249 * sa_res_key are the same do the above
8250 * except don't unregister the res holder.
8253 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8254 if (i == residx || ctl_get_prkey(lun, i) == 0)
8257 if (sa_res_key == ctl_get_prkey(lun, i)) {
8258 ctl_clr_prkey(lun, i);
8259 lun->pr_key_count--;
8260 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
8261 } else if (type != lun->res_type
8262 && (lun->res_type == SPR_TYPE_WR_EX_RO
8263 || lun->res_type ==SPR_TYPE_EX_AC_RO)){
8264 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE);
8267 lun->res_type = type;
8268 if (lun->res_type != SPR_TYPE_WR_EX_AR
8269 && lun->res_type != SPR_TYPE_EX_AC_AR)
8270 lun->pr_res_idx = residx;
8272 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8274 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8275 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8276 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8277 persis_io.pr.pr_info.residx = lun->pr_res_idx;
8278 persis_io.pr.pr_info.res_type = type;
8279 memcpy(persis_io.pr.pr_info.sa_res_key,
8280 param->serv_act_res_key,
8281 sizeof(param->serv_act_res_key));
8282 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8283 &persis_io, sizeof(persis_io), 0)) >
8284 CTL_HA_STATUS_SUCCESS) {
8285 printf("CTL:Persis Out error returned "
8286 "from ctl_ha_msg_send %d\n",
8291 * sa_res_key is not the res holder just
8292 * remove registrants
8296 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8297 if (sa_res_key != ctl_get_prkey(lun, i))
8301 ctl_clr_prkey(lun, i);
8302 lun->pr_key_count--;
8303 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
8307 mtx_unlock(&lun->lun_lock);
8308 free(ctsio->kern_data_ptr, M_CTL);
8309 ctl_set_reservation_conflict(ctsio);
8310 ctl_done((union ctl_io *)ctsio);
8313 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8314 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8315 persis_io.pr.pr_info.action = CTL_PR_PREEMPT;
8316 persis_io.pr.pr_info.residx = lun->pr_res_idx;
8317 persis_io.pr.pr_info.res_type = type;
8318 memcpy(persis_io.pr.pr_info.sa_res_key,
8319 param->serv_act_res_key,
8320 sizeof(param->serv_act_res_key));
8321 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8322 &persis_io, sizeof(persis_io), 0)) >
8323 CTL_HA_STATUS_SUCCESS) {
8324 printf("CTL:Persis Out error returned "
8325 "from ctl_ha_msg_send %d\n",
8331 lun->PRGeneration++;
8332 mtx_unlock(&lun->lun_lock);
8338 ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg)
8340 uint64_t sa_res_key;
8343 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key);
8345 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS
8346 || lun->pr_res_idx == CTL_PR_NO_RESERVATION
8347 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) {
8348 if (sa_res_key == 0) {
8350 * Unregister everybody else and build UA for
8353 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8354 if (i == msg->pr.pr_info.residx ||
8355 ctl_get_prkey(lun, i) == 0)
8358 ctl_clr_prkey(lun, i);
8359 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
8362 lun->pr_key_count = 1;
8363 lun->res_type = msg->pr.pr_info.res_type;
8364 if (lun->res_type != SPR_TYPE_WR_EX_AR
8365 && lun->res_type != SPR_TYPE_EX_AC_AR)
8366 lun->pr_res_idx = msg->pr.pr_info.residx;
8368 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8369 if (sa_res_key == ctl_get_prkey(lun, i))
8372 ctl_clr_prkey(lun, i);
8373 lun->pr_key_count--;
8374 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
8378 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8379 if (i == msg->pr.pr_info.residx ||
8380 ctl_get_prkey(lun, i) == 0)
8383 if (sa_res_key == ctl_get_prkey(lun, i)) {
8384 ctl_clr_prkey(lun, i);
8385 lun->pr_key_count--;
8386 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
8387 } else if (msg->pr.pr_info.res_type != lun->res_type
8388 && (lun->res_type == SPR_TYPE_WR_EX_RO
8389 || lun->res_type == SPR_TYPE_EX_AC_RO)) {
8390 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE);
8393 lun->res_type = msg->pr.pr_info.res_type;
8394 if (lun->res_type != SPR_TYPE_WR_EX_AR
8395 && lun->res_type != SPR_TYPE_EX_AC_AR)
8396 lun->pr_res_idx = msg->pr.pr_info.residx;
8398 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8400 lun->PRGeneration++;
8406 ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
8410 u_int32_t param_len;
8411 struct scsi_per_res_out *cdb;
8412 struct ctl_lun *lun;
8413 struct scsi_per_res_out_parms* param;
8414 struct ctl_softc *softc;
8416 uint64_t res_key, sa_res_key, key;
8418 union ctl_ha_msg persis_io;
8421 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
8423 retval = CTL_RETVAL_COMPLETE;
8425 cdb = (struct scsi_per_res_out *)ctsio->cdb;
8426 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8427 softc = lun->ctl_softc;
8430 * We only support whole-LUN scope. The scope & type are ignored for
8431 * register, register and ignore existing key and clear.
8432 * We sometimes ignore scope and type on preempts too!!
8433 * Verify reservation type here as well.
8435 type = cdb->scope_type & SPR_TYPE_MASK;
8436 if ((cdb->action == SPRO_RESERVE)
8437 || (cdb->action == SPRO_RELEASE)) {
8438 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) {
8439 ctl_set_invalid_field(/*ctsio*/ ctsio,
8445 ctl_done((union ctl_io *)ctsio);
8446 return (CTL_RETVAL_COMPLETE);
8449 if (type>8 || type==2 || type==4 || type==0) {
8450 ctl_set_invalid_field(/*ctsio*/ ctsio,
8456 ctl_done((union ctl_io *)ctsio);
8457 return (CTL_RETVAL_COMPLETE);
8461 param_len = scsi_4btoul(cdb->length);
8463 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {
8464 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK);
8465 ctsio->kern_data_len = param_len;
8466 ctsio->kern_total_len = param_len;
8467 ctsio->kern_data_resid = 0;
8468 ctsio->kern_rel_offset = 0;
8469 ctsio->kern_sg_entries = 0;
8470 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
8471 ctsio->be_move_done = ctl_config_move_done;
8472 ctl_datamove((union ctl_io *)ctsio);
8474 return (CTL_RETVAL_COMPLETE);
8477 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr;
8479 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
8480 res_key = scsi_8btou64(param->res_key.key);
8481 sa_res_key = scsi_8btou64(param->serv_act_res_key);
8484 * Validate the reservation key here except for SPRO_REG_IGNO
8485 * This must be done for all other service actions
8487 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) {
8488 mtx_lock(&lun->lun_lock);
8489 if ((key = ctl_get_prkey(lun, residx)) != 0) {
8490 if (res_key != key) {
8492 * The current key passed in doesn't match
8493 * the one the initiator previously
8496 mtx_unlock(&lun->lun_lock);
8497 free(ctsio->kern_data_ptr, M_CTL);
8498 ctl_set_reservation_conflict(ctsio);
8499 ctl_done((union ctl_io *)ctsio);
8500 return (CTL_RETVAL_COMPLETE);
8502 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) {
8504 * We are not registered
8506 mtx_unlock(&lun->lun_lock);
8507 free(ctsio->kern_data_ptr, M_CTL);
8508 ctl_set_reservation_conflict(ctsio);
8509 ctl_done((union ctl_io *)ctsio);
8510 return (CTL_RETVAL_COMPLETE);
8511 } else if (res_key != 0) {
8513 * We are not registered and trying to register but
8514 * the register key isn't zero.
8516 mtx_unlock(&lun->lun_lock);
8517 free(ctsio->kern_data_ptr, M_CTL);
8518 ctl_set_reservation_conflict(ctsio);
8519 ctl_done((union ctl_io *)ctsio);
8520 return (CTL_RETVAL_COMPLETE);
8522 mtx_unlock(&lun->lun_lock);
8525 switch (cdb->action & SPRO_ACTION_MASK) {
8527 case SPRO_REG_IGNO: {
8530 printf("Registration received\n");
8534 * We don't support any of these options, as we report in
8535 * the read capabilities request (see
8536 * ctl_persistent_reserve_in(), above).
8538 if ((param->flags & SPR_SPEC_I_PT)
8539 || (param->flags & SPR_ALL_TG_PT)
8540 || (param->flags & SPR_APTPL)) {
8543 if (param->flags & SPR_APTPL)
8545 else if (param->flags & SPR_ALL_TG_PT)
8547 else /* SPR_SPEC_I_PT */
8550 free(ctsio->kern_data_ptr, M_CTL);
8551 ctl_set_invalid_field(ctsio,
8557 ctl_done((union ctl_io *)ctsio);
8558 return (CTL_RETVAL_COMPLETE);
8561 mtx_lock(&lun->lun_lock);
8564 * The initiator wants to clear the
8567 if (sa_res_key == 0) {
8569 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER)
8570 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO
8571 && ctl_get_prkey(lun, residx) == 0)) {
8572 mtx_unlock(&lun->lun_lock);
8576 ctl_clr_prkey(lun, residx);
8577 lun->pr_key_count--;
8579 if (residx == lun->pr_res_idx) {
8580 lun->flags &= ~CTL_LUN_PR_RESERVED;
8581 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8583 if ((lun->res_type == SPR_TYPE_WR_EX_RO
8584 || lun->res_type == SPR_TYPE_EX_AC_RO)
8585 && lun->pr_key_count) {
8587 * If the reservation is a registrants
8588 * only type we need to generate a UA
8589 * for other registered inits. The
8590 * sense code should be RESERVATIONS
8594 for (i = 0; i < CTL_MAX_INITIATORS;i++){
8595 if (ctl_get_prkey(lun, i +
8596 softc->persis_offset) == 0)
8599 CTL_UA_RES_RELEASE);
8603 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8604 if (lun->pr_key_count==0) {
8605 lun->flags &= ~CTL_LUN_PR_RESERVED;
8607 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8610 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8611 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8612 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY;
8613 persis_io.pr.pr_info.residx = residx;
8614 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8615 &persis_io, sizeof(persis_io), 0 )) >
8616 CTL_HA_STATUS_SUCCESS) {
8617 printf("CTL:Persis Out error returned from "
8618 "ctl_ha_msg_send %d\n", isc_retval);
8620 } else /* sa_res_key != 0 */ {
8623 * If we aren't registered currently then increment
8624 * the key count and set the registered flag.
8626 ctl_alloc_prkey(lun, residx);
8627 if (ctl_get_prkey(lun, residx) == 0)
8628 lun->pr_key_count++;
8629 ctl_set_prkey(lun, residx, sa_res_key);
8631 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8632 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8633 persis_io.pr.pr_info.action = CTL_PR_REG_KEY;
8634 persis_io.pr.pr_info.residx = residx;
8635 memcpy(persis_io.pr.pr_info.sa_res_key,
8636 param->serv_act_res_key,
8637 sizeof(param->serv_act_res_key));
8638 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8639 &persis_io, sizeof(persis_io), 0)) >
8640 CTL_HA_STATUS_SUCCESS) {
8641 printf("CTL:Persis Out error returned from "
8642 "ctl_ha_msg_send %d\n", isc_retval);
8645 lun->PRGeneration++;
8646 mtx_unlock(&lun->lun_lock);
8652 printf("Reserve executed type %d\n", type);
8654 mtx_lock(&lun->lun_lock);
8655 if (lun->flags & CTL_LUN_PR_RESERVED) {
8657 * if this isn't the reservation holder and it's
8658 * not a "all registrants" type or if the type is
8659 * different then we have a conflict
8661 if ((lun->pr_res_idx != residx
8662 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS)
8663 || lun->res_type != type) {
8664 mtx_unlock(&lun->lun_lock);
8665 free(ctsio->kern_data_ptr, M_CTL);
8666 ctl_set_reservation_conflict(ctsio);
8667 ctl_done((union ctl_io *)ctsio);
8668 return (CTL_RETVAL_COMPLETE);
8670 mtx_unlock(&lun->lun_lock);
8671 } else /* create a reservation */ {
8673 * If it's not an "all registrants" type record
8674 * reservation holder
8676 if (type != SPR_TYPE_WR_EX_AR
8677 && type != SPR_TYPE_EX_AC_AR)
8678 lun->pr_res_idx = residx; /* Res holder */
8680 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS;
8682 lun->flags |= CTL_LUN_PR_RESERVED;
8683 lun->res_type = type;
8685 mtx_unlock(&lun->lun_lock);
8687 /* send msg to other side */
8688 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8689 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8690 persis_io.pr.pr_info.action = CTL_PR_RESERVE;
8691 persis_io.pr.pr_info.residx = lun->pr_res_idx;
8692 persis_io.pr.pr_info.res_type = type;
8693 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
8694 &persis_io, sizeof(persis_io), 0)) >
8695 CTL_HA_STATUS_SUCCESS) {
8696 printf("CTL:Persis Out error returned from "
8697 "ctl_ha_msg_send %d\n", isc_retval);
8703 mtx_lock(&lun->lun_lock);
8704 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) {
8705 /* No reservation exists return good status */
8706 mtx_unlock(&lun->lun_lock);
8710 * Is this nexus a reservation holder?
8712 if (lun->pr_res_idx != residx
8713 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) {
8715 * not a res holder return good status but
8718 mtx_unlock(&lun->lun_lock);
8722 if (lun->res_type != type) {
8723 mtx_unlock(&lun->lun_lock);
8724 free(ctsio->kern_data_ptr, M_CTL);
8725 ctl_set_illegal_pr_release(ctsio);
8726 ctl_done((union ctl_io *)ctsio);
8727 return (CTL_RETVAL_COMPLETE);
8730 /* okay to release */
8731 lun->flags &= ~CTL_LUN_PR_RESERVED;
8732 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8736 * if this isn't an exclusive access
8737 * res generate UA for all other
8740 if (type != SPR_TYPE_EX_AC
8741 && type != SPR_TYPE_WR_EX) {
8742 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8745 i + softc->persis_offset) == 0)
8747 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8750 mtx_unlock(&lun->lun_lock);
8751 /* Send msg to other side */
8752 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8753 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8754 persis_io.pr.pr_info.action = CTL_PR_RELEASE;
8755 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io,
8756 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
8757 printf("CTL:Persis Out error returned from "
8758 "ctl_ha_msg_send %d\n", isc_retval);
8763 /* send msg to other side */
8765 mtx_lock(&lun->lun_lock);
8766 lun->flags &= ~CTL_LUN_PR_RESERVED;
8768 lun->pr_key_count = 0;
8769 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8771 ctl_clr_prkey(lun, residx);
8772 for (i=0; i < 2*CTL_MAX_INITIATORS; i++)
8773 if (ctl_get_prkey(lun, i) != 0) {
8774 ctl_clr_prkey(lun, i);
8775 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
8777 lun->PRGeneration++;
8778 mtx_unlock(&lun->lun_lock);
8779 persis_io.hdr.nexus = ctsio->io_hdr.nexus;
8780 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION;
8781 persis_io.pr.pr_info.action = CTL_PR_CLEAR;
8782 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io,
8783 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) {
8784 printf("CTL:Persis Out error returned from "
8785 "ctl_ha_msg_send %d\n", isc_retval);
8790 case SPRO_PRE_ABO: {
8793 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type,
8794 residx, ctsio, cdb, param);
8796 return (CTL_RETVAL_COMPLETE);
8800 panic("Invalid PR type %x", cdb->action);
8804 free(ctsio->kern_data_ptr, M_CTL);
8805 ctl_set_success(ctsio);
8806 ctl_done((union ctl_io *)ctsio);
8812 * This routine is for handling a message from the other SC pertaining to
8813 * persistent reserve out. All the error checking will have been done
8814 * so only perorming the action need be done here to keep the two
8818 ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
8820 struct ctl_lun *lun;
8821 struct ctl_softc *softc;
8825 softc = control_softc;
8827 targ_lun = msg->hdr.nexus.targ_mapped_lun;
8828 lun = softc->ctl_luns[targ_lun];
8829 mtx_lock(&lun->lun_lock);
8830 switch(msg->pr.pr_info.action) {
8831 case CTL_PR_REG_KEY:
8832 ctl_alloc_prkey(lun, msg->pr.pr_info.residx);
8833 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0)
8834 lun->pr_key_count++;
8835 ctl_set_prkey(lun, msg->pr.pr_info.residx,
8836 scsi_8btou64(msg->pr.pr_info.sa_res_key));
8837 lun->PRGeneration++;
8840 case CTL_PR_UNREG_KEY:
8841 ctl_clr_prkey(lun, msg->pr.pr_info.residx);
8842 lun->pr_key_count--;
8844 /* XXX Need to see if the reservation has been released */
8845 /* if so do we need to generate UA? */
8846 if (msg->pr.pr_info.residx == lun->pr_res_idx) {
8847 lun->flags &= ~CTL_LUN_PR_RESERVED;
8848 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8850 if ((lun->res_type == SPR_TYPE_WR_EX_RO
8851 || lun->res_type == SPR_TYPE_EX_AC_RO)
8852 && lun->pr_key_count) {
8854 * If the reservation is a registrants
8855 * only type we need to generate a UA
8856 * for other registered inits. The
8857 * sense code should be RESERVATIONS
8861 for (i = 0; i < CTL_MAX_INITIATORS; i++) {
8862 if (ctl_get_prkey(lun, i +
8863 softc->persis_offset) == 0)
8866 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8870 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) {
8871 if (lun->pr_key_count==0) {
8872 lun->flags &= ~CTL_LUN_PR_RESERVED;
8874 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8877 lun->PRGeneration++;
8880 case CTL_PR_RESERVE:
8881 lun->flags |= CTL_LUN_PR_RESERVED;
8882 lun->res_type = msg->pr.pr_info.res_type;
8883 lun->pr_res_idx = msg->pr.pr_info.residx;
8887 case CTL_PR_RELEASE:
8889 * if this isn't an exclusive access res generate UA for all
8890 * other registrants.
8892 if (lun->res_type != SPR_TYPE_EX_AC
8893 && lun->res_type != SPR_TYPE_WR_EX) {
8894 for (i = 0; i < CTL_MAX_INITIATORS; i++)
8895 if (ctl_get_prkey(lun, i + softc->persis_offset) != 0)
8896 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
8899 lun->flags &= ~CTL_LUN_PR_RESERVED;
8900 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8904 case CTL_PR_PREEMPT:
8905 ctl_pro_preempt_other(lun, msg);
8908 lun->flags &= ~CTL_LUN_PR_RESERVED;
8910 lun->pr_key_count = 0;
8911 lun->pr_res_idx = CTL_PR_NO_RESERVATION;
8913 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) {
8914 if (ctl_get_prkey(lun, i) == 0)
8916 ctl_clr_prkey(lun, i);
8917 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT);
8919 lun->PRGeneration++;
8923 mtx_unlock(&lun->lun_lock);
8927 ctl_read_write(struct ctl_scsiio *ctsio)
8929 struct ctl_lun *lun;
8930 struct ctl_lba_len_flags *lbalen;
8932 uint32_t num_blocks;
8936 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
8938 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
8941 retval = CTL_RETVAL_COMPLETE;
8943 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10
8944 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16;
8945 switch (ctsio->cdb[0]) {
8948 struct scsi_rw_6 *cdb;
8950 cdb = (struct scsi_rw_6 *)ctsio->cdb;
8952 lba = scsi_3btoul(cdb->addr);
8953 /* only 5 bits are valid in the most significant address byte */
8955 num_blocks = cdb->length;
8957 * This is correct according to SBC-2.
8959 if (num_blocks == 0)
8965 struct scsi_rw_10 *cdb;
8967 cdb = (struct scsi_rw_10 *)ctsio->cdb;
8968 if (cdb->byte2 & SRW10_FUA)
8969 flags |= CTL_LLF_FUA;
8970 if (cdb->byte2 & SRW10_DPO)
8971 flags |= CTL_LLF_DPO;
8972 lba = scsi_4btoul(cdb->addr);
8973 num_blocks = scsi_2btoul(cdb->length);
8976 case WRITE_VERIFY_10: {
8977 struct scsi_write_verify_10 *cdb;
8979 cdb = (struct scsi_write_verify_10 *)ctsio->cdb;
8980 flags |= CTL_LLF_FUA;
8981 if (cdb->byte2 & SWV_DPO)
8982 flags |= CTL_LLF_DPO;
8983 lba = scsi_4btoul(cdb->addr);
8984 num_blocks = scsi_2btoul(cdb->length);
8989 struct scsi_rw_12 *cdb;
8991 cdb = (struct scsi_rw_12 *)ctsio->cdb;
8992 if (cdb->byte2 & SRW12_FUA)
8993 flags |= CTL_LLF_FUA;
8994 if (cdb->byte2 & SRW12_DPO)
8995 flags |= CTL_LLF_DPO;
8996 lba = scsi_4btoul(cdb->addr);
8997 num_blocks = scsi_4btoul(cdb->length);
9000 case WRITE_VERIFY_12: {
9001 struct scsi_write_verify_12 *cdb;
9003 cdb = (struct scsi_write_verify_12 *)ctsio->cdb;
9004 flags |= CTL_LLF_FUA;
9005 if (cdb->byte2 & SWV_DPO)
9006 flags |= CTL_LLF_DPO;
9007 lba = scsi_4btoul(cdb->addr);
9008 num_blocks = scsi_4btoul(cdb->length);
9013 struct scsi_rw_16 *cdb;
9015 cdb = (struct scsi_rw_16 *)ctsio->cdb;
9016 if (cdb->byte2 & SRW12_FUA)
9017 flags |= CTL_LLF_FUA;
9018 if (cdb->byte2 & SRW12_DPO)
9019 flags |= CTL_LLF_DPO;
9020 lba = scsi_8btou64(cdb->addr);
9021 num_blocks = scsi_4btoul(cdb->length);
9024 case WRITE_ATOMIC_16: {
9025 struct scsi_rw_16 *cdb;
9027 if (lun->be_lun->atomicblock == 0) {
9028 ctl_set_invalid_opcode(ctsio);
9029 ctl_done((union ctl_io *)ctsio);
9030 return (CTL_RETVAL_COMPLETE);
9033 cdb = (struct scsi_rw_16 *)ctsio->cdb;
9034 if (cdb->byte2 & SRW12_FUA)
9035 flags |= CTL_LLF_FUA;
9036 if (cdb->byte2 & SRW12_DPO)
9037 flags |= CTL_LLF_DPO;
9038 lba = scsi_8btou64(cdb->addr);
9039 num_blocks = scsi_4btoul(cdb->length);
9040 if (num_blocks > lun->be_lun->atomicblock) {
9041 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1,
9042 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0,
9044 ctl_done((union ctl_io *)ctsio);
9045 return (CTL_RETVAL_COMPLETE);
9049 case WRITE_VERIFY_16: {
9050 struct scsi_write_verify_16 *cdb;
9052 cdb = (struct scsi_write_verify_16 *)ctsio->cdb;
9053 flags |= CTL_LLF_FUA;
9054 if (cdb->byte2 & SWV_DPO)
9055 flags |= CTL_LLF_DPO;
9056 lba = scsi_8btou64(cdb->addr);
9057 num_blocks = scsi_4btoul(cdb->length);
9062 * We got a command we don't support. This shouldn't
9063 * happen, commands should be filtered out above us.
9065 ctl_set_invalid_opcode(ctsio);
9066 ctl_done((union ctl_io *)ctsio);
9068 return (CTL_RETVAL_COMPLETE);
9069 break; /* NOTREACHED */
9073 * The first check is to make sure we're in bounds, the second
9074 * check is to catch wrap-around problems. If the lba + num blocks
9075 * is less than the lba, then we've wrapped around and the block
9076 * range is invalid anyway.
9078 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
9079 || ((lba + num_blocks) < lba)) {
9080 ctl_set_lba_out_of_range(ctsio);
9081 ctl_done((union ctl_io *)ctsio);
9082 return (CTL_RETVAL_COMPLETE);
9086 * According to SBC-3, a transfer length of 0 is not an error.
9087 * Note that this cannot happen with WRITE(6) or READ(6), since 0
9088 * translates to 256 blocks for those commands.
9090 if (num_blocks == 0) {
9091 ctl_set_success(ctsio);
9092 ctl_done((union ctl_io *)ctsio);
9093 return (CTL_RETVAL_COMPLETE);
9096 /* Set FUA and/or DPO if caches are disabled. */
9098 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
9100 flags |= CTL_LLF_FUA | CTL_LLF_DPO;
9102 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
9104 flags |= CTL_LLF_FUA;
9107 lbalen = (struct ctl_lba_len_flags *)
9108 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
9110 lbalen->len = num_blocks;
9111 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags;
9113 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
9114 ctsio->kern_rel_offset = 0;
9116 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n"));
9118 retval = lun->backend->data_submit((union ctl_io *)ctsio);
9124 ctl_cnw_cont(union ctl_io *io)
9126 struct ctl_scsiio *ctsio;
9127 struct ctl_lun *lun;
9128 struct ctl_lba_len_flags *lbalen;
9131 ctsio = &io->scsiio;
9132 ctsio->io_hdr.status = CTL_STATUS_NONE;
9133 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
9134 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9135 lbalen = (struct ctl_lba_len_flags *)
9136 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
9137 lbalen->flags &= ~CTL_LLF_COMPARE;
9138 lbalen->flags |= CTL_LLF_WRITE;
9140 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n"));
9141 retval = lun->backend->data_submit((union ctl_io *)ctsio);
9146 ctl_cnw(struct ctl_scsiio *ctsio)
9148 struct ctl_lun *lun;
9149 struct ctl_lba_len_flags *lbalen;
9151 uint32_t num_blocks;
9154 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9156 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
9159 retval = CTL_RETVAL_COMPLETE;
9161 switch (ctsio->cdb[0]) {
9162 case COMPARE_AND_WRITE: {
9163 struct scsi_compare_and_write *cdb;
9165 cdb = (struct scsi_compare_and_write *)ctsio->cdb;
9166 if (cdb->byte2 & SRW10_FUA)
9167 flags |= CTL_LLF_FUA;
9168 if (cdb->byte2 & SRW10_DPO)
9169 flags |= CTL_LLF_DPO;
9170 lba = scsi_8btou64(cdb->addr);
9171 num_blocks = cdb->length;
9176 * We got a command we don't support. This shouldn't
9177 * happen, commands should be filtered out above us.
9179 ctl_set_invalid_opcode(ctsio);
9180 ctl_done((union ctl_io *)ctsio);
9182 return (CTL_RETVAL_COMPLETE);
9183 break; /* NOTREACHED */
9187 * The first check is to make sure we're in bounds, the second
9188 * check is to catch wrap-around problems. If the lba + num blocks
9189 * is less than the lba, then we've wrapped around and the block
9190 * range is invalid anyway.
9192 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
9193 || ((lba + num_blocks) < lba)) {
9194 ctl_set_lba_out_of_range(ctsio);
9195 ctl_done((union ctl_io *)ctsio);
9196 return (CTL_RETVAL_COMPLETE);
9200 * According to SBC-3, a transfer length of 0 is not an error.
9202 if (num_blocks == 0) {
9203 ctl_set_success(ctsio);
9204 ctl_done((union ctl_io *)ctsio);
9205 return (CTL_RETVAL_COMPLETE);
9208 /* Set FUA if write cache is disabled. */
9209 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 &
9211 flags |= CTL_LLF_FUA;
9213 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize;
9214 ctsio->kern_rel_offset = 0;
9217 * Set the IO_CONT flag, so that if this I/O gets passed to
9218 * ctl_data_submit_done(), it'll get passed back to
9219 * ctl_ctl_cnw_cont() for further processing.
9221 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT;
9222 ctsio->io_cont = ctl_cnw_cont;
9224 lbalen = (struct ctl_lba_len_flags *)
9225 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
9227 lbalen->len = num_blocks;
9228 lbalen->flags = CTL_LLF_COMPARE | flags;
9230 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n"));
9231 retval = lun->backend->data_submit((union ctl_io *)ctsio);
9236 ctl_verify(struct ctl_scsiio *ctsio)
9238 struct ctl_lun *lun;
9239 struct ctl_lba_len_flags *lbalen;
9241 uint32_t num_blocks;
9245 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9247 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
9250 flags = CTL_LLF_FUA;
9251 retval = CTL_RETVAL_COMPLETE;
9253 switch (ctsio->cdb[0]) {
9255 struct scsi_verify_10 *cdb;
9257 cdb = (struct scsi_verify_10 *)ctsio->cdb;
9258 if (cdb->byte2 & SVFY_BYTCHK)
9260 if (cdb->byte2 & SVFY_DPO)
9261 flags |= CTL_LLF_DPO;
9262 lba = scsi_4btoul(cdb->addr);
9263 num_blocks = scsi_2btoul(cdb->length);
9267 struct scsi_verify_12 *cdb;
9269 cdb = (struct scsi_verify_12 *)ctsio->cdb;
9270 if (cdb->byte2 & SVFY_BYTCHK)
9272 if (cdb->byte2 & SVFY_DPO)
9273 flags |= CTL_LLF_DPO;
9274 lba = scsi_4btoul(cdb->addr);
9275 num_blocks = scsi_4btoul(cdb->length);
9279 struct scsi_rw_16 *cdb;
9281 cdb = (struct scsi_rw_16 *)ctsio->cdb;
9282 if (cdb->byte2 & SVFY_BYTCHK)
9284 if (cdb->byte2 & SVFY_DPO)
9285 flags |= CTL_LLF_DPO;
9286 lba = scsi_8btou64(cdb->addr);
9287 num_blocks = scsi_4btoul(cdb->length);
9292 * We got a command we don't support. This shouldn't
9293 * happen, commands should be filtered out above us.
9295 ctl_set_invalid_opcode(ctsio);
9296 ctl_done((union ctl_io *)ctsio);
9297 return (CTL_RETVAL_COMPLETE);
9301 * The first check is to make sure we're in bounds, the second
9302 * check is to catch wrap-around problems. If the lba + num blocks
9303 * is less than the lba, then we've wrapped around and the block
9304 * range is invalid anyway.
9306 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1))
9307 || ((lba + num_blocks) < lba)) {
9308 ctl_set_lba_out_of_range(ctsio);
9309 ctl_done((union ctl_io *)ctsio);
9310 return (CTL_RETVAL_COMPLETE);
9314 * According to SBC-3, a transfer length of 0 is not an error.
9316 if (num_blocks == 0) {
9317 ctl_set_success(ctsio);
9318 ctl_done((union ctl_io *)ctsio);
9319 return (CTL_RETVAL_COMPLETE);
9322 lbalen = (struct ctl_lba_len_flags *)
9323 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
9325 lbalen->len = num_blocks;
9327 lbalen->flags = CTL_LLF_COMPARE | flags;
9328 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize;
9330 lbalen->flags = CTL_LLF_VERIFY | flags;
9331 ctsio->kern_total_len = 0;
9333 ctsio->kern_rel_offset = 0;
9335 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n"));
9336 retval = lun->backend->data_submit((union ctl_io *)ctsio);
9341 ctl_report_luns(struct ctl_scsiio *ctsio)
9343 struct ctl_softc *softc = control_softc;
9344 struct scsi_report_luns *cdb;
9345 struct scsi_report_luns_data *lun_data;
9346 struct ctl_lun *lun, *request_lun;
9347 struct ctl_port *port;
9348 int num_luns, retval;
9349 uint32_t alloc_len, lun_datalen;
9350 int num_filled, well_known;
9351 uint32_t initidx, targ_lun_id, lun_id;
9353 retval = CTL_RETVAL_COMPLETE;
9356 cdb = (struct scsi_report_luns *)ctsio->cdb;
9357 port = ctl_io_port(&ctsio->io_hdr);
9359 CTL_DEBUG_PRINT(("ctl_report_luns\n"));
9361 mtx_lock(&softc->ctl_lock);
9363 for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) {
9364 if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS)
9367 mtx_unlock(&softc->ctl_lock);
9369 switch (cdb->select_report) {
9370 case RPL_REPORT_DEFAULT:
9371 case RPL_REPORT_ALL:
9373 case RPL_REPORT_WELLKNOWN:
9378 ctl_set_invalid_field(ctsio,
9384 ctl_done((union ctl_io *)ctsio);
9386 break; /* NOTREACHED */
9389 alloc_len = scsi_4btoul(cdb->length);
9391 * The initiator has to allocate at least 16 bytes for this request,
9392 * so he can at least get the header and the first LUN. Otherwise
9393 * we reject the request (per SPC-3 rev 14, section 6.21).
9395 if (alloc_len < (sizeof(struct scsi_report_luns_data) +
9396 sizeof(struct scsi_report_luns_lundata))) {
9397 ctl_set_invalid_field(ctsio,
9403 ctl_done((union ctl_io *)ctsio);
9407 request_lun = (struct ctl_lun *)
9408 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9410 lun_datalen = sizeof(*lun_data) +
9411 (num_luns * sizeof(struct scsi_report_luns_lundata));
9413 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO);
9414 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr;
9415 ctsio->kern_sg_entries = 0;
9417 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
9419 mtx_lock(&softc->ctl_lock);
9420 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) {
9421 lun_id = ctl_lun_map_from_port(port, targ_lun_id);
9422 if (lun_id >= CTL_MAX_LUNS)
9424 lun = softc->ctl_luns[lun_id];
9428 if (targ_lun_id <= 0xff) {
9430 * Peripheral addressing method, bus number 0.
9432 lun_data->luns[num_filled].lundata[0] =
9433 RPL_LUNDATA_ATYP_PERIPH;
9434 lun_data->luns[num_filled].lundata[1] = targ_lun_id;
9436 } else if (targ_lun_id <= 0x3fff) {
9438 * Flat addressing method.
9440 lun_data->luns[num_filled].lundata[0] =
9441 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8);
9442 lun_data->luns[num_filled].lundata[1] =
9443 (targ_lun_id & 0xff);
9445 } else if (targ_lun_id <= 0xffffff) {
9447 * Extended flat addressing method.
9449 lun_data->luns[num_filled].lundata[0] =
9450 RPL_LUNDATA_ATYP_EXTLUN | 0x12;
9451 scsi_ulto3b(targ_lun_id,
9452 &lun_data->luns[num_filled].lundata[1]);
9455 printf("ctl_report_luns: bogus LUN number %jd, "
9456 "skipping\n", (intmax_t)targ_lun_id);
9459 * According to SPC-3, rev 14 section 6.21:
9461 * "The execution of a REPORT LUNS command to any valid and
9462 * installed logical unit shall clear the REPORTED LUNS DATA
9463 * HAS CHANGED unit attention condition for all logical
9464 * units of that target with respect to the requesting
9465 * initiator. A valid and installed logical unit is one
9466 * having a PERIPHERAL QUALIFIER of 000b in the standard
9467 * INQUIRY data (see 6.4.2)."
9469 * If request_lun is NULL, the LUN this report luns command
9470 * was issued to is either disabled or doesn't exist. In that
9471 * case, we shouldn't clear any pending lun change unit
9474 if (request_lun != NULL) {
9475 mtx_lock(&lun->lun_lock);
9476 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE);
9477 mtx_unlock(&lun->lun_lock);
9480 mtx_unlock(&softc->ctl_lock);
9483 * It's quite possible that we've returned fewer LUNs than we allocated
9484 * space for. Trim it.
9486 lun_datalen = sizeof(*lun_data) +
9487 (num_filled * sizeof(struct scsi_report_luns_lundata));
9489 if (lun_datalen < alloc_len) {
9490 ctsio->residual = alloc_len - lun_datalen;
9491 ctsio->kern_data_len = lun_datalen;
9492 ctsio->kern_total_len = lun_datalen;
9494 ctsio->residual = 0;
9495 ctsio->kern_data_len = alloc_len;
9496 ctsio->kern_total_len = alloc_len;
9498 ctsio->kern_data_resid = 0;
9499 ctsio->kern_rel_offset = 0;
9500 ctsio->kern_sg_entries = 0;
9503 * We set this to the actual data length, regardless of how much
9504 * space we actually have to return results. If the user looks at
9505 * this value, he'll know whether or not he allocated enough space
9506 * and reissue the command if necessary. We don't support well
9507 * known logical units, so if the user asks for that, return none.
9509 scsi_ulto4b(lun_datalen - 8, lun_data->length);
9512 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy
9515 ctl_set_success(ctsio);
9516 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9517 ctsio->be_move_done = ctl_config_move_done;
9518 ctl_datamove((union ctl_io *)ctsio);
9523 ctl_request_sense(struct ctl_scsiio *ctsio)
9525 struct scsi_request_sense *cdb;
9526 struct scsi_sense_data *sense_ptr;
9527 struct ctl_softc *ctl_softc;
9528 struct ctl_lun *lun;
9531 scsi_sense_data_type sense_format;
9532 ctl_ua_type ua_type;
9534 cdb = (struct scsi_request_sense *)ctsio->cdb;
9536 ctl_softc = control_softc;
9537 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9539 CTL_DEBUG_PRINT(("ctl_request_sense\n"));
9542 * Determine which sense format the user wants.
9544 if (cdb->byte2 & SRS_DESC)
9545 sense_format = SSD_TYPE_DESC;
9547 sense_format = SSD_TYPE_FIXED;
9549 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK);
9550 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr;
9551 ctsio->kern_sg_entries = 0;
9554 * struct scsi_sense_data, which is currently set to 256 bytes, is
9555 * larger than the largest allowed value for the length field in the
9556 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4.
9558 ctsio->residual = 0;
9559 ctsio->kern_data_len = cdb->length;
9560 ctsio->kern_total_len = cdb->length;
9562 ctsio->kern_data_resid = 0;
9563 ctsio->kern_rel_offset = 0;
9564 ctsio->kern_sg_entries = 0;
9567 * If we don't have a LUN, we don't have any pending sense.
9573 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
9575 * Check for pending sense, and then for pending unit attentions.
9576 * Pending sense gets returned first, then pending unit attentions.
9578 mtx_lock(&lun->lun_lock);
9580 if (ctl_is_set(lun->have_ca, initidx)) {
9581 scsi_sense_data_type stored_format;
9584 * Check to see which sense format was used for the stored
9587 stored_format = scsi_sense_type(&lun->pending_sense[initidx]);
9590 * If the user requested a different sense format than the
9591 * one we stored, then we need to convert it to the other
9592 * format. If we're going from descriptor to fixed format
9593 * sense data, we may lose things in translation, depending
9594 * on what options were used.
9596 * If the stored format is SSD_TYPE_NONE (i.e. invalid),
9597 * for some reason we'll just copy it out as-is.
9599 if ((stored_format == SSD_TYPE_FIXED)
9600 && (sense_format == SSD_TYPE_DESC))
9601 ctl_sense_to_desc((struct scsi_sense_data_fixed *)
9602 &lun->pending_sense[initidx],
9603 (struct scsi_sense_data_desc *)sense_ptr);
9604 else if ((stored_format == SSD_TYPE_DESC)
9605 && (sense_format == SSD_TYPE_FIXED))
9606 ctl_sense_to_fixed((struct scsi_sense_data_desc *)
9607 &lun->pending_sense[initidx],
9608 (struct scsi_sense_data_fixed *)sense_ptr);
9610 memcpy(sense_ptr, &lun->pending_sense[initidx],
9611 MIN(sizeof(*sense_ptr),
9612 sizeof(lun->pending_sense[initidx])));
9614 ctl_clear_mask(lun->have_ca, initidx);
9619 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format);
9620 if (ua_type != CTL_UA_NONE)
9622 if (ua_type == CTL_UA_LUN_CHANGE) {
9623 mtx_unlock(&lun->lun_lock);
9624 mtx_lock(&ctl_softc->ctl_lock);
9625 ctl_clear_ua(ctl_softc, initidx, ua_type);
9626 mtx_unlock(&ctl_softc->ctl_lock);
9627 mtx_lock(&lun->lun_lock);
9631 mtx_unlock(&lun->lun_lock);
9634 * We already have a pending error, return it.
9636 if (have_error != 0) {
9638 * We report the SCSI status as OK, since the status of the
9639 * request sense command itself is OK.
9640 * We report 0 for the sense length, because we aren't doing
9641 * autosense in this case. We're reporting sense as
9644 ctl_set_success(ctsio);
9645 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9646 ctsio->be_move_done = ctl_config_move_done;
9647 ctl_datamove((union ctl_io *)ctsio);
9648 return (CTL_RETVAL_COMPLETE);
9654 * No sense information to report, so we report that everything is
9657 ctl_set_sense_data(sense_ptr,
9660 /*current_error*/ 1,
9661 /*sense_key*/ SSD_KEY_NO_SENSE,
9667 * We report 0 for the sense length, because we aren't doing
9668 * autosense in this case. We're reporting sense as parameter data.
9670 ctl_set_success(ctsio);
9671 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9672 ctsio->be_move_done = ctl_config_move_done;
9673 ctl_datamove((union ctl_io *)ctsio);
9674 return (CTL_RETVAL_COMPLETE);
9678 ctl_tur(struct ctl_scsiio *ctsio)
9681 CTL_DEBUG_PRINT(("ctl_tur\n"));
9683 ctl_set_success(ctsio);
9684 ctl_done((union ctl_io *)ctsio);
9686 return (CTL_RETVAL_COMPLETE);
9691 ctl_cmddt_inquiry(struct ctl_scsiio *ctsio)
9698 * SCSI VPD page 0x00, the Supported VPD Pages page.
9701 ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
9703 struct scsi_vpd_supported_pages *pages;
9705 struct ctl_lun *lun;
9708 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9710 sup_page_size = sizeof(struct scsi_vpd_supported_pages) *
9711 SCSI_EVPD_NUM_SUPPORTED_PAGES;
9712 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO);
9713 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
9714 ctsio->kern_sg_entries = 0;
9716 if (sup_page_size < alloc_len) {
9717 ctsio->residual = alloc_len - sup_page_size;
9718 ctsio->kern_data_len = sup_page_size;
9719 ctsio->kern_total_len = sup_page_size;
9721 ctsio->residual = 0;
9722 ctsio->kern_data_len = alloc_len;
9723 ctsio->kern_total_len = alloc_len;
9725 ctsio->kern_data_resid = 0;
9726 ctsio->kern_rel_offset = 0;
9727 ctsio->kern_sg_entries = 0;
9730 * The control device is always connected. The disk device, on the
9731 * other hand, may not be online all the time. Need to change this
9732 * to figure out whether the disk device is actually online or not.
9735 pages->device = (SID_QUAL_LU_CONNECTED << 5) |
9736 lun->be_lun->lun_type;
9738 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9741 /* Supported VPD pages */
9742 pages->page_list[p++] = SVPD_SUPPORTED_PAGES;
9744 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER;
9745 /* Device Identification */
9746 pages->page_list[p++] = SVPD_DEVICE_ID;
9747 /* Extended INQUIRY Data */
9748 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA;
9749 /* Mode Page Policy */
9750 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY;
9752 pages->page_list[p++] = SVPD_SCSI_PORTS;
9753 /* Third-party Copy */
9754 pages->page_list[p++] = SVPD_SCSI_TPC;
9755 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) {
9757 pages->page_list[p++] = SVPD_BLOCK_LIMITS;
9758 /* Block Device Characteristics */
9759 pages->page_list[p++] = SVPD_BDC;
9760 /* Logical Block Provisioning */
9761 pages->page_list[p++] = SVPD_LBP;
9765 ctl_set_success(ctsio);
9766 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9767 ctsio->be_move_done = ctl_config_move_done;
9768 ctl_datamove((union ctl_io *)ctsio);
9769 return (CTL_RETVAL_COMPLETE);
9773 * SCSI VPD page 0x80, the Unit Serial Number page.
9776 ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
9778 struct scsi_vpd_unit_serial_number *sn_ptr;
9779 struct ctl_lun *lun;
9782 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9784 data_len = 4 + CTL_SN_LEN;
9785 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9786 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
9787 if (data_len < alloc_len) {
9788 ctsio->residual = alloc_len - data_len;
9789 ctsio->kern_data_len = data_len;
9790 ctsio->kern_total_len = data_len;
9792 ctsio->residual = 0;
9793 ctsio->kern_data_len = alloc_len;
9794 ctsio->kern_total_len = alloc_len;
9796 ctsio->kern_data_resid = 0;
9797 ctsio->kern_rel_offset = 0;
9798 ctsio->kern_sg_entries = 0;
9801 * The control device is always connected. The disk device, on the
9802 * other hand, may not be online all the time. Need to change this
9803 * to figure out whether the disk device is actually online or not.
9806 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9807 lun->be_lun->lun_type;
9809 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9811 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER;
9812 sn_ptr->length = CTL_SN_LEN;
9814 * If we don't have a LUN, we just leave the serial number as
9818 strncpy((char *)sn_ptr->serial_num,
9819 (char *)lun->be_lun->serial_num, CTL_SN_LEN);
9821 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN);
9823 ctl_set_success(ctsio);
9824 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9825 ctsio->be_move_done = ctl_config_move_done;
9826 ctl_datamove((union ctl_io *)ctsio);
9827 return (CTL_RETVAL_COMPLETE);
9832 * SCSI VPD page 0x86, the Extended INQUIRY Data page.
9835 ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
9837 struct scsi_vpd_extended_inquiry_data *eid_ptr;
9838 struct ctl_lun *lun;
9841 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9843 data_len = sizeof(struct scsi_vpd_extended_inquiry_data);
9844 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9845 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr;
9846 ctsio->kern_sg_entries = 0;
9848 if (data_len < alloc_len) {
9849 ctsio->residual = alloc_len - data_len;
9850 ctsio->kern_data_len = data_len;
9851 ctsio->kern_total_len = data_len;
9853 ctsio->residual = 0;
9854 ctsio->kern_data_len = alloc_len;
9855 ctsio->kern_total_len = alloc_len;
9857 ctsio->kern_data_resid = 0;
9858 ctsio->kern_rel_offset = 0;
9859 ctsio->kern_sg_entries = 0;
9862 * The control device is always connected. The disk device, on the
9863 * other hand, may not be online all the time.
9866 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9867 lun->be_lun->lun_type;
9869 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9870 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA;
9871 scsi_ulto2b(data_len - 4, eid_ptr->page_length);
9873 * We support head of queue, ordered and simple tags.
9875 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP;
9877 * Volatile cache supported.
9879 eid_ptr->flags3 = SVPD_EID_V_SUP;
9882 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit
9883 * attention for a particular IT nexus on all LUNs once we report
9884 * it to that nexus once. This bit is required as of SPC-4.
9886 eid_ptr->flags4 = SVPD_EID_LUICLT;
9889 * XXX KDM in order to correctly answer this, we would need
9890 * information from the SIM to determine how much sense data it
9891 * can send. So this would really be a path inquiry field, most
9892 * likely. This can be set to a maximum of 252 according to SPC-4,
9893 * but the hardware may or may not be able to support that much.
9894 * 0 just means that the maximum sense data length is not reported.
9896 eid_ptr->max_sense_length = 0;
9898 ctl_set_success(ctsio);
9899 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9900 ctsio->be_move_done = ctl_config_move_done;
9901 ctl_datamove((union ctl_io *)ctsio);
9902 return (CTL_RETVAL_COMPLETE);
9906 ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
9908 struct scsi_vpd_mode_page_policy *mpp_ptr;
9909 struct ctl_lun *lun;
9912 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9914 data_len = sizeof(struct scsi_vpd_mode_page_policy) +
9915 sizeof(struct scsi_vpd_mode_page_policy_descr);
9917 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9918 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr;
9919 ctsio->kern_sg_entries = 0;
9921 if (data_len < alloc_len) {
9922 ctsio->residual = alloc_len - data_len;
9923 ctsio->kern_data_len = data_len;
9924 ctsio->kern_total_len = data_len;
9926 ctsio->residual = 0;
9927 ctsio->kern_data_len = alloc_len;
9928 ctsio->kern_total_len = alloc_len;
9930 ctsio->kern_data_resid = 0;
9931 ctsio->kern_rel_offset = 0;
9932 ctsio->kern_sg_entries = 0;
9935 * The control device is always connected. The disk device, on the
9936 * other hand, may not be online all the time.
9939 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
9940 lun->be_lun->lun_type;
9942 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
9943 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY;
9944 scsi_ulto2b(data_len - 4, mpp_ptr->page_length);
9945 mpp_ptr->descr[0].page_code = 0x3f;
9946 mpp_ptr->descr[0].subpage_code = 0xff;
9947 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED;
9949 ctl_set_success(ctsio);
9950 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
9951 ctsio->be_move_done = ctl_config_move_done;
9952 ctl_datamove((union ctl_io *)ctsio);
9953 return (CTL_RETVAL_COMPLETE);
9957 * SCSI VPD page 0x83, the Device Identification page.
9960 ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
9962 struct scsi_vpd_device_id *devid_ptr;
9963 struct scsi_vpd_id_descriptor *desc;
9964 struct ctl_softc *softc;
9965 struct ctl_lun *lun;
9966 struct ctl_port *port;
9970 softc = control_softc;
9972 port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)];
9973 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
9975 data_len = sizeof(struct scsi_vpd_device_id) +
9976 sizeof(struct scsi_vpd_id_descriptor) +
9977 sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
9978 sizeof(struct scsi_vpd_id_descriptor) +
9979 sizeof(struct scsi_vpd_id_trgt_port_grp_id);
9980 if (lun && lun->lun_devid)
9981 data_len += lun->lun_devid->len;
9982 if (port->port_devid)
9983 data_len += port->port_devid->len;
9984 if (port->target_devid)
9985 data_len += port->target_devid->len;
9987 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9988 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr;
9989 ctsio->kern_sg_entries = 0;
9991 if (data_len < alloc_len) {
9992 ctsio->residual = alloc_len - data_len;
9993 ctsio->kern_data_len = data_len;
9994 ctsio->kern_total_len = data_len;
9996 ctsio->residual = 0;
9997 ctsio->kern_data_len = alloc_len;
9998 ctsio->kern_total_len = alloc_len;
10000 ctsio->kern_data_resid = 0;
10001 ctsio->kern_rel_offset = 0;
10002 ctsio->kern_sg_entries = 0;
10005 * The control device is always connected. The disk device, on the
10006 * other hand, may not be online all the time.
10009 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10010 lun->be_lun->lun_type;
10012 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10013 devid_ptr->page_code = SVPD_DEVICE_ID;
10014 scsi_ulto2b(data_len - 4, devid_ptr->length);
10016 if (port->port_type == CTL_PORT_FC)
10017 proto = SCSI_PROTO_FC << 4;
10018 else if (port->port_type == CTL_PORT_ISCSI)
10019 proto = SCSI_PROTO_ISCSI << 4;
10021 proto = SCSI_PROTO_SPI << 4;
10022 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list;
10025 * We're using a LUN association here. i.e., this device ID is a
10026 * per-LUN identifier.
10028 if (lun && lun->lun_devid) {
10029 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len);
10030 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
10031 lun->lun_devid->len);
10035 * This is for the WWPN which is a port association.
10037 if (port->port_devid) {
10038 memcpy(desc, port->port_devid->data, port->port_devid->len);
10039 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc +
10040 port->port_devid->len);
10044 * This is for the Relative Target Port(type 4h) identifier
10046 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
10047 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
10048 SVPD_ID_TYPE_RELTARG;
10050 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]);
10051 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
10052 sizeof(struct scsi_vpd_id_rel_trgt_port_id));
10055 * This is for the Target Port Group(type 5h) identifier
10057 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY;
10058 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT |
10059 SVPD_ID_TYPE_TPORTGRP;
10061 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1,
10062 &desc->identifier[2]);
10063 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] +
10064 sizeof(struct scsi_vpd_id_trgt_port_grp_id));
10067 * This is for the Target identifier
10069 if (port->target_devid) {
10070 memcpy(desc, port->target_devid->data, port->target_devid->len);
10073 ctl_set_success(ctsio);
10074 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10075 ctsio->be_move_done = ctl_config_move_done;
10076 ctl_datamove((union ctl_io *)ctsio);
10077 return (CTL_RETVAL_COMPLETE);
10081 ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
10083 struct ctl_softc *softc = control_softc;
10084 struct scsi_vpd_scsi_ports *sp;
10085 struct scsi_vpd_port_designation *pd;
10086 struct scsi_vpd_port_designation_cont *pdc;
10087 struct ctl_lun *lun;
10088 struct ctl_port *port;
10089 int data_len, num_target_ports, iid_len, id_len, g, pg, p;
10090 int num_target_port_groups;
10092 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10094 if (softc->is_single)
10095 num_target_port_groups = 1;
10097 num_target_port_groups = NUM_TARGET_PORT_GROUPS;
10098 num_target_ports = 0;
10101 mtx_lock(&softc->ctl_lock);
10102 STAILQ_FOREACH(port, &softc->port_list, links) {
10103 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
10106 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
10108 num_target_ports++;
10109 if (port->init_devid)
10110 iid_len += port->init_devid->len;
10111 if (port->port_devid)
10112 id_len += port->port_devid->len;
10114 mtx_unlock(&softc->ctl_lock);
10116 data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups *
10117 num_target_ports * (sizeof(struct scsi_vpd_port_designation) +
10118 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len;
10119 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10120 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr;
10121 ctsio->kern_sg_entries = 0;
10123 if (data_len < alloc_len) {
10124 ctsio->residual = alloc_len - data_len;
10125 ctsio->kern_data_len = data_len;
10126 ctsio->kern_total_len = data_len;
10128 ctsio->residual = 0;
10129 ctsio->kern_data_len = alloc_len;
10130 ctsio->kern_total_len = alloc_len;
10132 ctsio->kern_data_resid = 0;
10133 ctsio->kern_rel_offset = 0;
10134 ctsio->kern_sg_entries = 0;
10137 * The control device is always connected. The disk device, on the
10138 * other hand, may not be online all the time. Need to change this
10139 * to figure out whether the disk device is actually online or not.
10142 sp->device = (SID_QUAL_LU_CONNECTED << 5) |
10143 lun->be_lun->lun_type;
10145 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10147 sp->page_code = SVPD_SCSI_PORTS;
10148 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports),
10150 pd = &sp->design[0];
10152 mtx_lock(&softc->ctl_lock);
10153 pg = softc->port_offset / CTL_MAX_PORTS;
10154 for (g = 0; g < num_target_port_groups; g++) {
10155 STAILQ_FOREACH(port, &softc->port_list, links) {
10156 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
10159 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
10161 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS;
10162 scsi_ulto2b(p, pd->relative_port_id);
10163 if (port->init_devid && g == pg) {
10164 iid_len = port->init_devid->len;
10165 memcpy(pd->initiator_transportid,
10166 port->init_devid->data, port->init_devid->len);
10169 scsi_ulto2b(iid_len, pd->initiator_transportid_length);
10170 pdc = (struct scsi_vpd_port_designation_cont *)
10171 (&pd->initiator_transportid[iid_len]);
10172 if (port->port_devid && g == pg) {
10173 id_len = port->port_devid->len;
10174 memcpy(pdc->target_port_descriptors,
10175 port->port_devid->data, port->port_devid->len);
10178 scsi_ulto2b(id_len, pdc->target_port_descriptors_length);
10179 pd = (struct scsi_vpd_port_designation *)
10180 ((uint8_t *)pdc->target_port_descriptors + id_len);
10183 mtx_unlock(&softc->ctl_lock);
10185 ctl_set_success(ctsio);
10186 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10187 ctsio->be_move_done = ctl_config_move_done;
10188 ctl_datamove((union ctl_io *)ctsio);
10189 return (CTL_RETVAL_COMPLETE);
10193 ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
10195 struct scsi_vpd_block_limits *bl_ptr;
10196 struct ctl_lun *lun;
10199 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10201 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO);
10202 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr;
10203 ctsio->kern_sg_entries = 0;
10205 if (sizeof(*bl_ptr) < alloc_len) {
10206 ctsio->residual = alloc_len - sizeof(*bl_ptr);
10207 ctsio->kern_data_len = sizeof(*bl_ptr);
10208 ctsio->kern_total_len = sizeof(*bl_ptr);
10210 ctsio->residual = 0;
10211 ctsio->kern_data_len = alloc_len;
10212 ctsio->kern_total_len = alloc_len;
10214 ctsio->kern_data_resid = 0;
10215 ctsio->kern_rel_offset = 0;
10216 ctsio->kern_sg_entries = 0;
10219 * The control device is always connected. The disk device, on the
10220 * other hand, may not be online all the time. Need to change this
10221 * to figure out whether the disk device is actually online or not.
10224 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10225 lun->be_lun->lun_type;
10227 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10229 bl_ptr->page_code = SVPD_BLOCK_LIMITS;
10230 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length);
10231 bl_ptr->max_cmp_write_len = 0xff;
10232 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len);
10234 bs = lun->be_lun->blocksize;
10235 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len);
10236 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
10237 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt);
10238 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt);
10239 if (lun->be_lun->ublockexp != 0) {
10240 scsi_ulto4b((1 << lun->be_lun->ublockexp),
10241 bl_ptr->opt_unmap_grain);
10242 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff,
10243 bl_ptr->unmap_grain_align);
10246 scsi_ulto4b(lun->be_lun->atomicblock,
10247 bl_ptr->max_atomic_transfer_length);
10248 scsi_ulto4b(0, bl_ptr->atomic_alignment);
10249 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity);
10251 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length);
10253 ctl_set_success(ctsio);
10254 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10255 ctsio->be_move_done = ctl_config_move_done;
10256 ctl_datamove((union ctl_io *)ctsio);
10257 return (CTL_RETVAL_COMPLETE);
10261 ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
10263 struct scsi_vpd_block_device_characteristics *bdc_ptr;
10264 struct ctl_lun *lun;
10268 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10270 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO);
10271 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr;
10272 ctsio->kern_sg_entries = 0;
10274 if (sizeof(*bdc_ptr) < alloc_len) {
10275 ctsio->residual = alloc_len - sizeof(*bdc_ptr);
10276 ctsio->kern_data_len = sizeof(*bdc_ptr);
10277 ctsio->kern_total_len = sizeof(*bdc_ptr);
10279 ctsio->residual = 0;
10280 ctsio->kern_data_len = alloc_len;
10281 ctsio->kern_total_len = alloc_len;
10283 ctsio->kern_data_resid = 0;
10284 ctsio->kern_rel_offset = 0;
10285 ctsio->kern_sg_entries = 0;
10288 * The control device is always connected. The disk device, on the
10289 * other hand, may not be online all the time. Need to change this
10290 * to figure out whether the disk device is actually online or not.
10293 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10294 lun->be_lun->lun_type;
10296 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10297 bdc_ptr->page_code = SVPD_BDC;
10298 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length);
10300 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL)
10301 i = strtol(value, NULL, 0);
10303 i = CTL_DEFAULT_ROTATION_RATE;
10304 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate);
10306 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL)
10307 i = strtol(value, NULL, 0);
10310 bdc_ptr->wab_wac_ff = (i & 0x0f);
10311 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS;
10313 ctl_set_success(ctsio);
10314 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10315 ctsio->be_move_done = ctl_config_move_done;
10316 ctl_datamove((union ctl_io *)ctsio);
10317 return (CTL_RETVAL_COMPLETE);
10321 ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
10323 struct scsi_vpd_logical_block_prov *lbp_ptr;
10324 struct ctl_lun *lun;
10326 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10328 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO);
10329 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr;
10330 ctsio->kern_sg_entries = 0;
10332 if (sizeof(*lbp_ptr) < alloc_len) {
10333 ctsio->residual = alloc_len - sizeof(*lbp_ptr);
10334 ctsio->kern_data_len = sizeof(*lbp_ptr);
10335 ctsio->kern_total_len = sizeof(*lbp_ptr);
10337 ctsio->residual = 0;
10338 ctsio->kern_data_len = alloc_len;
10339 ctsio->kern_total_len = alloc_len;
10341 ctsio->kern_data_resid = 0;
10342 ctsio->kern_rel_offset = 0;
10343 ctsio->kern_sg_entries = 0;
10346 * The control device is always connected. The disk device, on the
10347 * other hand, may not be online all the time. Need to change this
10348 * to figure out whether the disk device is actually online or not.
10351 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10352 lun->be_lun->lun_type;
10354 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10356 lbp_ptr->page_code = SVPD_LBP;
10357 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length);
10358 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT;
10359 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
10360 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 |
10361 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP;
10362 lbp_ptr->prov_type = SVPD_LBP_THIN;
10365 ctl_set_success(ctsio);
10366 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10367 ctsio->be_move_done = ctl_config_move_done;
10368 ctl_datamove((union ctl_io *)ctsio);
10369 return (CTL_RETVAL_COMPLETE);
10373 * INQUIRY with the EVPD bit set.
10376 ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
10378 struct ctl_lun *lun;
10379 struct scsi_inquiry *cdb;
10380 int alloc_len, retval;
10382 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10383 cdb = (struct scsi_inquiry *)ctsio->cdb;
10384 alloc_len = scsi_2btoul(cdb->length);
10386 switch (cdb->page_code) {
10387 case SVPD_SUPPORTED_PAGES:
10388 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len);
10390 case SVPD_UNIT_SERIAL_NUMBER:
10391 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len);
10393 case SVPD_DEVICE_ID:
10394 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len);
10396 case SVPD_EXTENDED_INQUIRY_DATA:
10397 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len);
10399 case SVPD_MODE_PAGE_POLICY:
10400 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len);
10402 case SVPD_SCSI_PORTS:
10403 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len);
10405 case SVPD_SCSI_TPC:
10406 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len);
10408 case SVPD_BLOCK_LIMITS:
10409 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
10411 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len);
10414 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
10416 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len);
10419 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT)
10421 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len);
10425 ctl_set_invalid_field(ctsio,
10431 ctl_done((union ctl_io *)ctsio);
10432 retval = CTL_RETVAL_COMPLETE;
10440 * Standard INQUIRY data.
10443 ctl_inquiry_std(struct ctl_scsiio *ctsio)
10445 struct scsi_inquiry_data *inq_ptr;
10446 struct scsi_inquiry *cdb;
10447 struct ctl_softc *softc;
10448 struct ctl_lun *lun;
10450 uint32_t alloc_len, data_len;
10451 ctl_port_type port_type;
10453 softc = control_softc;
10456 * Figure out whether we're talking to a Fibre Channel port or not.
10457 * We treat the ioctl front end, and any SCSI adapters, as packetized
10460 port_type = softc->ctl_ports[
10461 ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type;
10462 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
10463 port_type = CTL_PORT_SCSI;
10465 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
10466 cdb = (struct scsi_inquiry *)ctsio->cdb;
10467 alloc_len = scsi_2btoul(cdb->length);
10470 * We malloc the full inquiry data size here and fill it
10471 * in. If the user only asks for less, we'll give him
10474 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1);
10475 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10476 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr;
10477 ctsio->kern_sg_entries = 0;
10478 ctsio->kern_data_resid = 0;
10479 ctsio->kern_rel_offset = 0;
10481 if (data_len < alloc_len) {
10482 ctsio->residual = alloc_len - data_len;
10483 ctsio->kern_data_len = data_len;
10484 ctsio->kern_total_len = data_len;
10486 ctsio->residual = 0;
10487 ctsio->kern_data_len = alloc_len;
10488 ctsio->kern_total_len = alloc_len;
10492 * If we have a LUN configured, report it as connected. Otherwise,
10493 * report that it is offline or no device is supported, depending
10494 * on the value of inquiry_pq_no_lun.
10496 * According to the spec (SPC-4 r34), the peripheral qualifier
10497 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario:
10499 * "A peripheral device having the specified peripheral device type
10500 * is not connected to this logical unit. However, the device
10501 * server is capable of supporting the specified peripheral device
10502 * type on this logical unit."
10504 * According to the same spec, the peripheral qualifier
10505 * SID_QUAL_BAD_LU (011b) is used in this scenario:
10507 * "The device server is not capable of supporting a peripheral
10508 * device on this logical unit. For this peripheral qualifier the
10509 * peripheral device type shall be set to 1Fh. All other peripheral
10510 * device type values are reserved for this peripheral qualifier."
10512 * Given the text, it would seem that we probably want to report that
10513 * the LUN is offline here. There is no LUN connected, but we can
10514 * support a LUN at the given LUN number.
10516 * In the real world, though, it sounds like things are a little
10519 * - Linux, when presented with a LUN with the offline peripheral
10520 * qualifier, will create an sg driver instance for it. So when
10521 * you attach it to CTL, you wind up with a ton of sg driver
10522 * instances. (One for every LUN that Linux bothered to probe.)
10523 * Linux does this despite the fact that it issues a REPORT LUNs
10524 * to LUN 0 to get the inventory of supported LUNs.
10526 * - There is other anecdotal evidence (from Emulex folks) about
10527 * arrays that use the offline peripheral qualifier for LUNs that
10528 * are on the "passive" path in an active/passive array.
10530 * So the solution is provide a hopefully reasonable default
10531 * (return bad/no LUN) and allow the user to change the behavior
10532 * with a tunable/sysctl variable.
10535 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) |
10536 lun->be_lun->lun_type;
10537 else if (softc->inquiry_pq_no_lun == 0)
10538 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT;
10540 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE;
10542 /* RMB in byte 2 is 0 */
10543 inq_ptr->version = SCSI_REV_SPC4;
10546 * According to SAM-3, even if a device only supports a single
10547 * level of LUN addressing, it should still set the HISUP bit:
10549 * 4.9.1 Logical unit numbers overview
10551 * All logical unit number formats described in this standard are
10552 * hierarchical in structure even when only a single level in that
10553 * hierarchy is used. The HISUP bit shall be set to one in the
10554 * standard INQUIRY data (see SPC-2) when any logical unit number
10555 * format described in this standard is used. Non-hierarchical
10556 * formats are outside the scope of this standard.
10558 * Therefore we set the HiSup bit here.
10560 * The reponse format is 2, per SPC-3.
10562 inq_ptr->response_format = SID_HiSup | 2;
10564 inq_ptr->additional_length = data_len -
10565 (offsetof(struct scsi_inquiry_data, additional_length) + 1);
10566 CTL_DEBUG_PRINT(("additional_length = %d\n",
10567 inq_ptr->additional_length));
10569 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT;
10570 /* 16 bit addressing */
10571 if (port_type == CTL_PORT_SCSI)
10572 inq_ptr->spc2_flags = SPC2_SID_ADDR16;
10573 /* XXX set the SID_MultiP bit here if we're actually going to
10574 respond on multiple ports */
10575 inq_ptr->spc2_flags |= SPC2_SID_MultiP;
10577 /* 16 bit data bus, synchronous transfers */
10578 if (port_type == CTL_PORT_SCSI)
10579 inq_ptr->flags = SID_WBus16 | SID_Sync;
10581 * XXX KDM do we want to support tagged queueing on the control
10585 || (lun->be_lun->lun_type != T_PROCESSOR))
10586 inq_ptr->flags |= SID_CmdQue;
10588 * Per SPC-3, unused bytes in ASCII strings are filled with spaces.
10589 * We have 8 bytes for the vendor name, and 16 bytes for the device
10590 * name and 4 bytes for the revision.
10592 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
10593 "vendor")) == NULL) {
10594 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor));
10596 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor));
10597 strncpy(inq_ptr->vendor, val,
10598 min(sizeof(inq_ptr->vendor), strlen(val)));
10601 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
10602 sizeof(inq_ptr->product));
10603 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) {
10604 switch (lun->be_lun->lun_type) {
10606 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT,
10607 sizeof(inq_ptr->product));
10610 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT,
10611 sizeof(inq_ptr->product));
10614 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT,
10615 sizeof(inq_ptr->product));
10619 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product));
10620 strncpy(inq_ptr->product, val,
10621 min(sizeof(inq_ptr->product), strlen(val)));
10625 * XXX make this a macro somewhere so it automatically gets
10626 * incremented when we make changes.
10628 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options,
10629 "revision")) == NULL) {
10630 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision));
10632 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision));
10633 strncpy(inq_ptr->revision, val,
10634 min(sizeof(inq_ptr->revision), strlen(val)));
10638 * For parallel SCSI, we support double transition and single
10639 * transition clocking. We also support QAS (Quick Arbitration
10640 * and Selection) and Information Unit transfers on both the
10641 * control and array devices.
10643 if (port_type == CTL_PORT_SCSI)
10644 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS |
10647 /* SAM-5 (no version claimed) */
10648 scsi_ulto2b(0x00A0, inq_ptr->version1);
10649 /* SPC-4 (no version claimed) */
10650 scsi_ulto2b(0x0460, inq_ptr->version2);
10651 if (port_type == CTL_PORT_FC) {
10652 /* FCP-2 ANSI INCITS.350:2003 */
10653 scsi_ulto2b(0x0917, inq_ptr->version3);
10654 } else if (port_type == CTL_PORT_SCSI) {
10655 /* SPI-4 ANSI INCITS.362:200x */
10656 scsi_ulto2b(0x0B56, inq_ptr->version3);
10657 } else if (port_type == CTL_PORT_ISCSI) {
10658 /* iSCSI (no version claimed) */
10659 scsi_ulto2b(0x0960, inq_ptr->version3);
10660 } else if (port_type == CTL_PORT_SAS) {
10661 /* SAS (no version claimed) */
10662 scsi_ulto2b(0x0BE0, inq_ptr->version3);
10666 /* SBC-4 (no version claimed) */
10667 scsi_ulto2b(0x0600, inq_ptr->version4);
10669 switch (lun->be_lun->lun_type) {
10671 /* SBC-4 (no version claimed) */
10672 scsi_ulto2b(0x0600, inq_ptr->version4);
10680 ctl_set_success(ctsio);
10681 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
10682 ctsio->be_move_done = ctl_config_move_done;
10683 ctl_datamove((union ctl_io *)ctsio);
10684 return (CTL_RETVAL_COMPLETE);
10688 ctl_inquiry(struct ctl_scsiio *ctsio)
10690 struct scsi_inquiry *cdb;
10693 CTL_DEBUG_PRINT(("ctl_inquiry\n"));
10695 cdb = (struct scsi_inquiry *)ctsio->cdb;
10696 if (cdb->byte2 & SI_EVPD)
10697 retval = ctl_inquiry_evpd(ctsio);
10698 else if (cdb->page_code == 0)
10699 retval = ctl_inquiry_std(ctsio);
10701 ctl_set_invalid_field(ctsio,
10707 ctl_done((union ctl_io *)ctsio);
10708 return (CTL_RETVAL_COMPLETE);
10715 * For known CDB types, parse the LBA and length.
10718 ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len)
10720 if (io->io_hdr.io_type != CTL_IO_SCSI)
10723 switch (io->scsiio.cdb[0]) {
10724 case COMPARE_AND_WRITE: {
10725 struct scsi_compare_and_write *cdb;
10727 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb;
10729 *lba = scsi_8btou64(cdb->addr);
10730 *len = cdb->length;
10735 struct scsi_rw_6 *cdb;
10737 cdb = (struct scsi_rw_6 *)io->scsiio.cdb;
10739 *lba = scsi_3btoul(cdb->addr);
10740 /* only 5 bits are valid in the most significant address byte */
10742 *len = cdb->length;
10747 struct scsi_rw_10 *cdb;
10749 cdb = (struct scsi_rw_10 *)io->scsiio.cdb;
10751 *lba = scsi_4btoul(cdb->addr);
10752 *len = scsi_2btoul(cdb->length);
10755 case WRITE_VERIFY_10: {
10756 struct scsi_write_verify_10 *cdb;
10758 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb;
10760 *lba = scsi_4btoul(cdb->addr);
10761 *len = scsi_2btoul(cdb->length);
10766 struct scsi_rw_12 *cdb;
10768 cdb = (struct scsi_rw_12 *)io->scsiio.cdb;
10770 *lba = scsi_4btoul(cdb->addr);
10771 *len = scsi_4btoul(cdb->length);
10774 case WRITE_VERIFY_12: {
10775 struct scsi_write_verify_12 *cdb;
10777 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb;
10779 *lba = scsi_4btoul(cdb->addr);
10780 *len = scsi_4btoul(cdb->length);
10785 case WRITE_ATOMIC_16: {
10786 struct scsi_rw_16 *cdb;
10788 cdb = (struct scsi_rw_16 *)io->scsiio.cdb;
10790 *lba = scsi_8btou64(cdb->addr);
10791 *len = scsi_4btoul(cdb->length);
10794 case WRITE_VERIFY_16: {
10795 struct scsi_write_verify_16 *cdb;
10797 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb;
10799 *lba = scsi_8btou64(cdb->addr);
10800 *len = scsi_4btoul(cdb->length);
10803 case WRITE_SAME_10: {
10804 struct scsi_write_same_10 *cdb;
10806 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb;
10808 *lba = scsi_4btoul(cdb->addr);
10809 *len = scsi_2btoul(cdb->length);
10812 case WRITE_SAME_16: {
10813 struct scsi_write_same_16 *cdb;
10815 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb;
10817 *lba = scsi_8btou64(cdb->addr);
10818 *len = scsi_4btoul(cdb->length);
10822 struct scsi_verify_10 *cdb;
10824 cdb = (struct scsi_verify_10 *)io->scsiio.cdb;
10826 *lba = scsi_4btoul(cdb->addr);
10827 *len = scsi_2btoul(cdb->length);
10831 struct scsi_verify_12 *cdb;
10833 cdb = (struct scsi_verify_12 *)io->scsiio.cdb;
10835 *lba = scsi_4btoul(cdb->addr);
10836 *len = scsi_4btoul(cdb->length);
10840 struct scsi_verify_16 *cdb;
10842 cdb = (struct scsi_verify_16 *)io->scsiio.cdb;
10844 *lba = scsi_8btou64(cdb->addr);
10845 *len = scsi_4btoul(cdb->length);
10853 case SERVICE_ACTION_IN: { /* GET LBA STATUS */
10854 struct scsi_get_lba_status *cdb;
10856 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb;
10857 *lba = scsi_8btou64(cdb->addr);
10863 break; /* NOTREACHED */
10870 ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2,
10873 uint64_t endlba1, endlba2;
10875 endlba1 = lba1 + len1 - (seq ? 0 : 1);
10876 endlba2 = lba2 + len2 - 1;
10878 if ((endlba1 < lba2) || (endlba2 < lba1))
10879 return (CTL_ACTION_PASS);
10881 return (CTL_ACTION_BLOCK);
10885 ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2)
10887 struct ctl_ptr_len_flags *ptrlen;
10888 struct scsi_unmap_desc *buf, *end, *range;
10892 /* If not UNMAP -- go other way. */
10893 if (io->io_hdr.io_type != CTL_IO_SCSI ||
10894 io->scsiio.cdb[0] != UNMAP)
10895 return (CTL_ACTION_ERROR);
10897 /* If UNMAP without data -- block and wait for data. */
10898 ptrlen = (struct ctl_ptr_len_flags *)
10899 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
10900 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 ||
10901 ptrlen->ptr == NULL)
10902 return (CTL_ACTION_BLOCK);
10904 /* UNMAP with data -- check for collision. */
10905 buf = (struct scsi_unmap_desc *)ptrlen->ptr;
10906 end = buf + ptrlen->len / sizeof(*buf);
10907 for (range = buf; range < end; range++) {
10908 lba = scsi_8btou64(range->lba);
10909 len = scsi_4btoul(range->length);
10910 if ((lba < lba2 + len2) && (lba + len > lba2))
10911 return (CTL_ACTION_BLOCK);
10913 return (CTL_ACTION_PASS);
10917 ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq)
10919 uint64_t lba1, lba2;
10920 uint64_t len1, len2;
10923 if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
10924 return (CTL_ACTION_ERROR);
10926 retval = ctl_extent_check_unmap(io1, lba2, len2);
10927 if (retval != CTL_ACTION_ERROR)
10930 if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
10931 return (CTL_ACTION_ERROR);
10933 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq));
10937 ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2)
10939 uint64_t lba1, lba2;
10940 uint64_t len1, len2;
10942 if (ctl_get_lba_len(io1, &lba1, &len1) != 0)
10943 return (CTL_ACTION_ERROR);
10944 if (ctl_get_lba_len(io2, &lba2, &len2) != 0)
10945 return (CTL_ACTION_ERROR);
10947 if (lba1 + len1 == lba2)
10948 return (CTL_ACTION_BLOCK);
10949 return (CTL_ACTION_PASS);
10953 ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io,
10954 union ctl_io *ooa_io)
10956 const struct ctl_cmd_entry *pending_entry, *ooa_entry;
10957 ctl_serialize_action *serialize_row;
10960 * The initiator attempted multiple untagged commands at the same
10961 * time. Can't do that.
10963 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
10964 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
10965 && ((pending_io->io_hdr.nexus.targ_port ==
10966 ooa_io->io_hdr.nexus.targ_port)
10967 && (pending_io->io_hdr.nexus.initid.id ==
10968 ooa_io->io_hdr.nexus.initid.id))
10969 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT |
10970 CTL_FLAG_STATUS_SENT)) == 0))
10971 return (CTL_ACTION_OVERLAP);
10974 * The initiator attempted to send multiple tagged commands with
10975 * the same ID. (It's fine if different initiators have the same
10978 * Even if all of those conditions are true, we don't kill the I/O
10979 * if the command ahead of us has been aborted. We won't end up
10980 * sending it to the FETD, and it's perfectly legal to resend a
10981 * command with the same tag number as long as the previous
10982 * instance of this tag number has been aborted somehow.
10984 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
10985 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED)
10986 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num)
10987 && ((pending_io->io_hdr.nexus.targ_port ==
10988 ooa_io->io_hdr.nexus.targ_port)
10989 && (pending_io->io_hdr.nexus.initid.id ==
10990 ooa_io->io_hdr.nexus.initid.id))
10991 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT |
10992 CTL_FLAG_STATUS_SENT)) == 0))
10993 return (CTL_ACTION_OVERLAP_TAG);
10996 * If we get a head of queue tag, SAM-3 says that we should
10997 * immediately execute it.
10999 * What happens if this command would normally block for some other
11000 * reason? e.g. a request sense with a head of queue tag
11001 * immediately after a write. Normally that would block, but this
11002 * will result in its getting executed immediately...
11004 * We currently return "pass" instead of "skip", so we'll end up
11005 * going through the rest of the queue to check for overlapped tags.
11007 * XXX KDM check for other types of blockage first??
11009 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
11010 return (CTL_ACTION_PASS);
11013 * Ordered tags have to block until all items ahead of them
11014 * have completed. If we get called with an ordered tag, we always
11015 * block, if something else is ahead of us in the queue.
11017 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED)
11018 return (CTL_ACTION_BLOCK);
11021 * Simple tags get blocked until all head of queue and ordered tags
11022 * ahead of them have completed. I'm lumping untagged commands in
11023 * with simple tags here. XXX KDM is that the right thing to do?
11025 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED)
11026 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE))
11027 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)
11028 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED)))
11029 return (CTL_ACTION_BLOCK);
11031 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL);
11032 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL);
11034 serialize_row = ctl_serialize_table[ooa_entry->seridx];
11036 switch (serialize_row[pending_entry->seridx]) {
11037 case CTL_SER_BLOCK:
11038 return (CTL_ACTION_BLOCK);
11039 case CTL_SER_EXTENT:
11040 return (ctl_extent_check(ooa_io, pending_io,
11041 (lun->serseq == CTL_LUN_SERSEQ_ON)));
11042 case CTL_SER_EXTENTOPT:
11043 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags
11044 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED)
11045 return (ctl_extent_check(ooa_io, pending_io,
11046 (lun->serseq == CTL_LUN_SERSEQ_ON)));
11047 return (CTL_ACTION_PASS);
11048 case CTL_SER_EXTENTSEQ:
11049 if (lun->serseq != CTL_LUN_SERSEQ_OFF)
11050 return (ctl_extent_check_seq(ooa_io, pending_io));
11051 return (CTL_ACTION_PASS);
11053 return (CTL_ACTION_PASS);
11054 case CTL_SER_BLOCKOPT:
11055 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags
11056 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED)
11057 return (CTL_ACTION_BLOCK);
11058 return (CTL_ACTION_PASS);
11060 return (CTL_ACTION_SKIP);
11062 panic("invalid serialization value %d",
11063 serialize_row[pending_entry->seridx]);
11066 return (CTL_ACTION_ERROR);
11070 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue.
11072 * - pending_io is generally either incoming, or on the blocked queue
11073 * - starting I/O is the I/O we want to start the check with.
11076 ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io,
11077 union ctl_io *starting_io)
11079 union ctl_io *ooa_io;
11082 mtx_assert(&lun->lun_lock, MA_OWNED);
11085 * Run back along the OOA queue, starting with the current
11086 * blocked I/O and going through every I/O before it on the
11087 * queue. If starting_io is NULL, we'll just end up returning
11090 for (ooa_io = starting_io; ooa_io != NULL;
11091 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq,
11095 * This routine just checks to see whether
11096 * cur_blocked is blocked by ooa_io, which is ahead
11097 * of it in the queue. It doesn't queue/dequeue
11100 action = ctl_check_for_blockage(lun, pending_io, ooa_io);
11102 case CTL_ACTION_BLOCK:
11103 case CTL_ACTION_OVERLAP:
11104 case CTL_ACTION_OVERLAP_TAG:
11105 case CTL_ACTION_SKIP:
11106 case CTL_ACTION_ERROR:
11108 break; /* NOTREACHED */
11109 case CTL_ACTION_PASS:
11112 panic("invalid action %d", action);
11113 break; /* NOTREACHED */
11117 return (CTL_ACTION_PASS);
11122 * - An I/O has just completed, and has been removed from the per-LUN OOA
11123 * queue, so some items on the blocked queue may now be unblocked.
11126 ctl_check_blocked(struct ctl_lun *lun)
11128 union ctl_io *cur_blocked, *next_blocked;
11130 mtx_assert(&lun->lun_lock, MA_OWNED);
11133 * Run forward from the head of the blocked queue, checking each
11134 * entry against the I/Os prior to it on the OOA queue to see if
11135 * there is still any blockage.
11137 * We cannot use the TAILQ_FOREACH() macro, because it can't deal
11138 * with our removing a variable on it while it is traversing the
11141 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue);
11142 cur_blocked != NULL; cur_blocked = next_blocked) {
11143 union ctl_io *prev_ooa;
11146 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr,
11149 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr,
11150 ctl_ooaq, ooa_links);
11153 * If cur_blocked happens to be the first item in the OOA
11154 * queue now, prev_ooa will be NULL, and the action
11155 * returned will just be CTL_ACTION_PASS.
11157 action = ctl_check_ooa(lun, cur_blocked, prev_ooa);
11160 case CTL_ACTION_BLOCK:
11161 /* Nothing to do here, still blocked */
11163 case CTL_ACTION_OVERLAP:
11164 case CTL_ACTION_OVERLAP_TAG:
11166 * This shouldn't happen! In theory we've already
11167 * checked this command for overlap...
11170 case CTL_ACTION_PASS:
11171 case CTL_ACTION_SKIP: {
11172 const struct ctl_cmd_entry *entry;
11176 * The skip case shouldn't happen, this transaction
11177 * should have never made it onto the blocked queue.
11180 * This I/O is no longer blocked, we can remove it
11181 * from the blocked queue. Since this is a TAILQ
11182 * (doubly linked list), we can do O(1) removals
11183 * from any place on the list.
11185 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr,
11187 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
11189 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){
11191 * Need to send IO back to original side to
11194 union ctl_ha_msg msg_info;
11196 msg_info.hdr.original_sc =
11197 cur_blocked->io_hdr.original_sc;
11198 msg_info.hdr.serializing_sc = cur_blocked;
11199 msg_info.hdr.msg_type = CTL_MSG_R2R;
11200 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
11201 &msg_info, sizeof(msg_info), 0)) >
11202 CTL_HA_STATUS_SUCCESS) {
11203 printf("CTL:Check Blocked error from "
11204 "ctl_ha_msg_send %d\n",
11209 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL);
11212 * Check this I/O for LUN state changes that may
11213 * have happened while this command was blocked.
11214 * The LUN state may have been changed by a command
11215 * ahead of us in the queue, so we need to re-check
11216 * for any states that can be caused by SCSI
11219 if (ctl_scsiio_lun_check(lun, entry,
11220 &cur_blocked->scsiio) == 0) {
11221 cur_blocked->io_hdr.flags |=
11222 CTL_FLAG_IS_WAS_ON_RTR;
11223 ctl_enqueue_rtr(cur_blocked);
11225 ctl_done(cur_blocked);
11230 * This probably shouldn't happen -- we shouldn't
11231 * get CTL_ACTION_ERROR, or anything else.
11237 return (CTL_RETVAL_COMPLETE);
11241 * This routine (with one exception) checks LUN flags that can be set by
11242 * commands ahead of us in the OOA queue. These flags have to be checked
11243 * when a command initially comes in, and when we pull a command off the
11244 * blocked queue and are preparing to execute it. The reason we have to
11245 * check these flags for commands on the blocked queue is that the LUN
11246 * state may have been changed by a command ahead of us while we're on the
11249 * Ordering is somewhat important with these checks, so please pay
11250 * careful attention to the placement of any new checks.
11253 ctl_scsiio_lun_check(struct ctl_lun *lun,
11254 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio)
11256 struct ctl_softc *softc = lun->ctl_softc;
11262 mtx_assert(&lun->lun_lock, MA_OWNED);
11265 * If this shelf is a secondary shelf controller, we have to reject
11266 * any media access commands.
11268 if ((softc->flags & CTL_FLAG_ACTIVE_SHELF) == 0 &&
11269 (entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0) {
11270 ctl_set_lun_standby(ctsio);
11275 if (entry->pattern & CTL_LUN_PAT_WRITE) {
11276 if (lun->flags & CTL_LUN_READONLY) {
11277 ctl_set_sense(ctsio, /*current_error*/ 1,
11278 /*sense_key*/ SSD_KEY_DATA_PROTECT,
11279 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE);
11283 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT]
11284 .eca_and_aen & SCP_SWP) != 0) {
11285 ctl_set_sense(ctsio, /*current_error*/ 1,
11286 /*sense_key*/ SSD_KEY_DATA_PROTECT,
11287 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE);
11294 * Check for a reservation conflict. If this command isn't allowed
11295 * even on reserved LUNs, and if this initiator isn't the one who
11296 * reserved us, reject the command with a reservation conflict.
11298 residx = ctl_get_resindex(&ctsio->io_hdr.nexus);
11299 if ((lun->flags & CTL_LUN_RESERVED)
11300 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) {
11301 if (lun->res_idx != residx) {
11302 ctl_set_reservation_conflict(ctsio);
11308 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 ||
11309 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) {
11310 /* No reservation or command is allowed. */;
11311 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) &&
11312 (lun->res_type == SPR_TYPE_WR_EX ||
11313 lun->res_type == SPR_TYPE_WR_EX_RO ||
11314 lun->res_type == SPR_TYPE_WR_EX_AR)) {
11315 /* The command is allowed for Write Exclusive resv. */;
11318 * if we aren't registered or it's a res holder type
11319 * reservation and this isn't the res holder then set a
11322 if (ctl_get_prkey(lun, residx) == 0
11323 || (residx != lun->pr_res_idx && lun->res_type < 4)) {
11324 ctl_set_reservation_conflict(ctsio);
11331 if ((lun->flags & CTL_LUN_OFFLINE)
11332 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) {
11333 ctl_set_lun_not_ready(ctsio);
11339 * If the LUN is stopped, see if this particular command is allowed
11340 * for a stopped lun. Otherwise, reject it with 0x04,0x02.
11342 if ((lun->flags & CTL_LUN_STOPPED)
11343 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) {
11344 /* "Logical unit not ready, initializing cmd. required" */
11345 ctl_set_lun_stopped(ctsio);
11350 if ((lun->flags & CTL_LUN_INOPERABLE)
11351 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) {
11352 /* "Medium format corrupted" */
11353 ctl_set_medium_format_corrupted(ctsio);
11364 ctl_failover_io(union ctl_io *io, int have_lock)
11366 ctl_set_busy(&io->scsiio);
11373 struct ctl_lun *lun;
11374 struct ctl_softc *softc;
11375 union ctl_io *next_io, *pending_io;
11379 softc = control_softc;
11381 mtx_lock(&softc->ctl_lock);
11383 * Remove any cmds from the other SC from the rtr queue. These
11384 * will obviously only be for LUNs for which we're the primary.
11385 * We can't send status or get/send data for these commands.
11386 * Since they haven't been executed yet, we can just remove them.
11387 * We'll either abort them or delete them below, depending on
11388 * which HA mode we're in.
11391 mtx_lock(&softc->queue_lock);
11392 for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue);
11393 io != NULL; io = next_io) {
11394 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links);
11395 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
11396 STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr,
11397 ctl_io_hdr, links);
11399 mtx_unlock(&softc->queue_lock);
11402 for (lun_idx=0; lun_idx < softc->num_luns; lun_idx++) {
11403 lun = softc->ctl_luns[lun_idx];
11408 * Processor LUNs are primary on both sides.
11409 * XXX will this always be true?
11411 if (lun->be_lun->lun_type == T_PROCESSOR)
11414 if ((lun->flags & CTL_LUN_PRIMARY_SC)
11415 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
11416 printf("FAILOVER: primary lun %d\n", lun_idx);
11418 * Remove all commands from the other SC. First from the
11419 * blocked queue then from the ooa queue. Once we have
11420 * removed them. Call ctl_check_blocked to see if there
11421 * is anything that can run.
11423 for (io = (union ctl_io *)TAILQ_FIRST(
11424 &lun->blocked_queue); io != NULL; io = next_io) {
11426 next_io = (union ctl_io *)TAILQ_NEXT(
11427 &io->io_hdr, blocked_links);
11429 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
11430 TAILQ_REMOVE(&lun->blocked_queue,
11431 &io->io_hdr,blocked_links);
11432 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED;
11433 TAILQ_REMOVE(&lun->ooa_queue,
11434 &io->io_hdr, ooa_links);
11440 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
11441 io != NULL; io = next_io) {
11443 next_io = (union ctl_io *)TAILQ_NEXT(
11444 &io->io_hdr, ooa_links);
11446 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) {
11448 TAILQ_REMOVE(&lun->ooa_queue,
11455 ctl_check_blocked(lun);
11456 } else if ((lun->flags & CTL_LUN_PRIMARY_SC)
11457 && (softc->ha_mode == CTL_HA_MODE_XFER)) {
11459 printf("FAILOVER: primary lun %d\n", lun_idx);
11461 * Abort all commands from the other SC. We can't
11462 * send status back for them now. These should get
11463 * cleaned up when they are completed or come out
11464 * for a datamove operation.
11466 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue);
11467 io != NULL; io = next_io) {
11468 next_io = (union ctl_io *)TAILQ_NEXT(
11469 &io->io_hdr, ooa_links);
11471 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
11472 io->io_hdr.flags |= CTL_FLAG_ABORT;
11474 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
11475 && (softc->ha_mode == CTL_HA_MODE_XFER)) {
11477 printf("FAILOVER: secondary lun %d\n", lun_idx);
11479 lun->flags |= CTL_LUN_PRIMARY_SC;
11482 * We send all I/O that was sent to this controller
11483 * and redirected to the other side back with
11484 * busy status, and have the initiator retry it.
11485 * Figuring out how much data has been transferred,
11486 * etc. and picking up where we left off would be
11489 * XXX KDM need to remove I/O from the blocked
11492 for (pending_io = (union ctl_io *)TAILQ_FIRST(
11493 &lun->ooa_queue); pending_io != NULL;
11494 pending_io = next_io) {
11496 next_io = (union ctl_io *)TAILQ_NEXT(
11497 &pending_io->io_hdr, ooa_links);
11499 pending_io->io_hdr.flags &=
11500 ~CTL_FLAG_SENT_2OTHER_SC;
11502 if (pending_io->io_hdr.flags &
11503 CTL_FLAG_IO_ACTIVE) {
11504 pending_io->io_hdr.flags |=
11507 ctl_set_busy(&pending_io->scsiio);
11508 ctl_done(pending_io);
11512 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
11513 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0)
11514 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) {
11515 printf("FAILOVER: secondary lun %d\n", lun_idx);
11517 * if the first io on the OOA is not on the RtR queue
11520 lun->flags |= CTL_LUN_PRIMARY_SC;
11522 pending_io = (union ctl_io *)TAILQ_FIRST(
11524 if (pending_io==NULL) {
11525 printf("Nothing on OOA queue\n");
11529 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC;
11530 if ((pending_io->io_hdr.flags &
11531 CTL_FLAG_IS_WAS_ON_RTR) == 0) {
11532 pending_io->io_hdr.flags |=
11533 CTL_FLAG_IS_WAS_ON_RTR;
11534 ctl_enqueue_rtr(pending_io);
11539 printf("Tag 0x%04x is running\n",
11540 pending_io->scsiio.tag_num);
11544 next_io = (union ctl_io *)TAILQ_NEXT(
11545 &pending_io->io_hdr, ooa_links);
11546 for (pending_io=next_io; pending_io != NULL;
11547 pending_io = next_io) {
11548 pending_io->io_hdr.flags &=
11549 ~CTL_FLAG_SENT_2OTHER_SC;
11550 next_io = (union ctl_io *)TAILQ_NEXT(
11551 &pending_io->io_hdr, ooa_links);
11552 if (pending_io->io_hdr.flags &
11553 CTL_FLAG_IS_WAS_ON_RTR) {
11555 printf("Tag 0x%04x is running\n",
11556 pending_io->scsiio.tag_num);
11561 switch (ctl_check_ooa(lun, pending_io,
11562 (union ctl_io *)TAILQ_PREV(
11563 &pending_io->io_hdr, ctl_ooaq,
11566 case CTL_ACTION_BLOCK:
11567 TAILQ_INSERT_TAIL(&lun->blocked_queue,
11568 &pending_io->io_hdr,
11570 pending_io->io_hdr.flags |=
11573 case CTL_ACTION_PASS:
11574 case CTL_ACTION_SKIP:
11575 pending_io->io_hdr.flags |=
11576 CTL_FLAG_IS_WAS_ON_RTR;
11577 ctl_enqueue_rtr(pending_io);
11579 case CTL_ACTION_OVERLAP:
11580 ctl_set_overlapped_cmd(
11581 (struct ctl_scsiio *)pending_io);
11582 ctl_done(pending_io);
11584 case CTL_ACTION_OVERLAP_TAG:
11585 ctl_set_overlapped_tag(
11586 (struct ctl_scsiio *)pending_io,
11587 pending_io->scsiio.tag_num & 0xff);
11588 ctl_done(pending_io);
11590 case CTL_ACTION_ERROR:
11592 ctl_set_internal_failure(
11593 (struct ctl_scsiio *)pending_io,
11596 ctl_done(pending_io);
11601 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
11603 panic("Unhandled HA mode failover, LUN flags = %#x, "
11604 "ha_mode = #%x", lun->flags, softc->ha_mode);
11608 mtx_unlock(&softc->ctl_lock);
11612 ctl_clear_ua(struct ctl_softc *ctl_softc, uint32_t initidx,
11613 ctl_ua_type ua_type)
11615 struct ctl_lun *lun;
11618 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED);
11620 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) {
11621 mtx_lock(&lun->lun_lock);
11622 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT];
11624 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua_type;
11625 mtx_unlock(&lun->lun_lock);
11630 ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
11632 struct ctl_lun *lun;
11633 const struct ctl_cmd_entry *entry;
11634 uint32_t initidx, targ_lun;
11641 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
11642 if ((targ_lun < CTL_MAX_LUNS)
11643 && ((lun = softc->ctl_luns[targ_lun]) != NULL)) {
11645 * If the LUN is invalid, pretend that it doesn't exist.
11646 * It will go away as soon as all pending I/O has been
11649 mtx_lock(&lun->lun_lock);
11650 if (lun->flags & CTL_LUN_DISABLED) {
11651 mtx_unlock(&lun->lun_lock);
11653 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
11654 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
11656 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun;
11657 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr =
11659 if (lun->be_lun->lun_type == T_PROCESSOR) {
11660 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV;
11664 * Every I/O goes into the OOA queue for a
11665 * particular LUN, and stays there until completion.
11668 if (TAILQ_EMPTY(&lun->ooa_queue)) {
11669 lun->idle_time += getsbinuptime() -
11673 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr,
11677 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL;
11678 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL;
11681 /* Get command entry and return error if it is unsuppotyed. */
11682 entry = ctl_validate_command(ctsio);
11683 if (entry == NULL) {
11685 mtx_unlock(&lun->lun_lock);
11689 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK;
11690 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK;
11693 * Check to see whether we can send this command to LUNs that don't
11694 * exist. This should pretty much only be the case for inquiry
11695 * and request sense. Further checks, below, really require having
11696 * a LUN, so we can't really check the command anymore. Just put
11697 * it on the rtr queue.
11700 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) {
11701 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11702 ctl_enqueue_rtr((union ctl_io *)ctsio);
11706 ctl_set_unsupported_lun(ctsio);
11707 ctl_done((union ctl_io *)ctsio);
11708 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n"));
11712 * Make sure we support this particular command on this LUN.
11713 * e.g., we don't support writes to the control LUN.
11715 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) {
11716 mtx_unlock(&lun->lun_lock);
11717 ctl_set_invalid_opcode(ctsio);
11718 ctl_done((union ctl_io *)ctsio);
11723 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
11727 * If we've got a request sense, it'll clear the contingent
11728 * allegiance condition. Otherwise, if we have a CA condition for
11729 * this initiator, clear it, because it sent down a command other
11730 * than request sense.
11732 if ((ctsio->cdb[0] != REQUEST_SENSE)
11733 && (ctl_is_set(lun->have_ca, initidx)))
11734 ctl_clear_mask(lun->have_ca, initidx);
11738 * If the command has this flag set, it handles its own unit
11739 * attention reporting, we shouldn't do anything. Otherwise we
11740 * check for any pending unit attentions, and send them back to the
11741 * initiator. We only do this when a command initially comes in,
11742 * not when we pull it off the blocked queue.
11744 * According to SAM-3, section 5.3.2, the order that things get
11745 * presented back to the host is basically unit attentions caused
11746 * by some sort of reset event, busy status, reservation conflicts
11747 * or task set full, and finally any other status.
11749 * One issue here is that some of the unit attentions we report
11750 * don't fall into the "reset" category (e.g. "reported luns data
11751 * has changed"). So reporting it here, before the reservation
11752 * check, may be technically wrong. I guess the only thing to do
11753 * would be to check for and report the reset events here, and then
11754 * check for the other unit attention types after we check for a
11755 * reservation conflict.
11757 * XXX KDM need to fix this
11759 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) {
11760 ctl_ua_type ua_type;
11761 scsi_sense_data_type sense_format;
11763 if (lun->flags & CTL_LUN_SENSE_DESC)
11764 sense_format = SSD_TYPE_DESC;
11766 sense_format = SSD_TYPE_FIXED;
11768 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data,
11770 if (ua_type != CTL_UA_NONE) {
11771 mtx_unlock(&lun->lun_lock);
11772 ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
11773 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
11774 ctsio->sense_len = SSD_FULL_SIZE;
11775 ctl_done((union ctl_io *)ctsio);
11781 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
11782 mtx_unlock(&lun->lun_lock);
11783 ctl_done((union ctl_io *)ctsio);
11788 * XXX CHD this is where we want to send IO to other side if
11789 * this LUN is secondary on this SC. We will need to make a copy
11790 * of the IO and flag the IO on this side as SENT_2OTHER and the flag
11791 * the copy we send as FROM_OTHER.
11792 * We also need to stuff the address of the original IO so we can
11793 * find it easily. Something similar will need be done on the other
11794 * side so when we are done we can find the copy.
11796 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) {
11797 union ctl_ha_msg msg_info;
11800 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
11802 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE;
11803 msg_info.hdr.original_sc = (union ctl_io *)ctsio;
11805 printf("1. ctsio %p\n", ctsio);
11807 msg_info.hdr.serializing_sc = NULL;
11808 msg_info.hdr.nexus = ctsio->io_hdr.nexus;
11809 msg_info.scsi.tag_num = ctsio->tag_num;
11810 msg_info.scsi.tag_type = ctsio->tag_type;
11811 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN);
11813 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
11815 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL,
11816 (void *)&msg_info, sizeof(msg_info), 0)) >
11817 CTL_HA_STATUS_SUCCESS) {
11818 printf("CTL:precheck, ctl_ha_msg_send returned %d\n",
11820 printf("CTL:opcode is %x\n", ctsio->cdb[0]);
11823 printf("CTL:Precheck sent msg, opcode is %x\n",opcode);
11828 * XXX KDM this I/O is off the incoming queue, but hasn't
11829 * been inserted on any other queue. We may need to come
11830 * up with a holding queue while we wait for serialization
11831 * so that we have an idea of what we're waiting for from
11834 mtx_unlock(&lun->lun_lock);
11838 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio,
11839 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr,
11840 ctl_ooaq, ooa_links))) {
11841 case CTL_ACTION_BLOCK:
11842 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED;
11843 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr,
11845 mtx_unlock(&lun->lun_lock);
11847 case CTL_ACTION_PASS:
11848 case CTL_ACTION_SKIP:
11849 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
11850 mtx_unlock(&lun->lun_lock);
11851 ctl_enqueue_rtr((union ctl_io *)ctsio);
11853 case CTL_ACTION_OVERLAP:
11854 mtx_unlock(&lun->lun_lock);
11855 ctl_set_overlapped_cmd(ctsio);
11856 ctl_done((union ctl_io *)ctsio);
11858 case CTL_ACTION_OVERLAP_TAG:
11859 mtx_unlock(&lun->lun_lock);
11860 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff);
11861 ctl_done((union ctl_io *)ctsio);
11863 case CTL_ACTION_ERROR:
11865 mtx_unlock(&lun->lun_lock);
11866 ctl_set_internal_failure(ctsio,
11868 /*retry_count*/ 0);
11869 ctl_done((union ctl_io *)ctsio);
11875 const struct ctl_cmd_entry *
11876 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa)
11878 const struct ctl_cmd_entry *entry;
11879 int service_action;
11881 entry = &ctl_cmd_table[ctsio->cdb[0]];
11883 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0);
11884 if (entry->flags & CTL_CMD_FLAG_SA5) {
11885 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK;
11886 entry = &((const struct ctl_cmd_entry *)
11887 entry->execute)[service_action];
11892 const struct ctl_cmd_entry *
11893 ctl_validate_command(struct ctl_scsiio *ctsio)
11895 const struct ctl_cmd_entry *entry;
11899 entry = ctl_get_cmd_entry(ctsio, &sa);
11900 if (entry->execute == NULL) {
11902 ctl_set_invalid_field(ctsio,
11909 ctl_set_invalid_opcode(ctsio);
11910 ctl_done((union ctl_io *)ctsio);
11913 KASSERT(entry->length > 0,
11914 ("Not defined length for command 0x%02x/0x%02x",
11915 ctsio->cdb[0], ctsio->cdb[1]));
11916 for (i = 1; i < entry->length; i++) {
11917 diff = ctsio->cdb[i] & ~entry->usage[i - 1];
11920 ctl_set_invalid_field(ctsio,
11925 /*bit*/ fls(diff) - 1);
11926 ctl_done((union ctl_io *)ctsio);
11933 ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry)
11936 switch (lun_type) {
11938 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) &&
11939 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0))
11943 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) &&
11944 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0))
11954 ctl_scsiio(struct ctl_scsiio *ctsio)
11957 const struct ctl_cmd_entry *entry;
11959 retval = CTL_RETVAL_COMPLETE;
11961 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0]));
11963 entry = ctl_get_cmd_entry(ctsio, NULL);
11966 * If this I/O has been aborted, just send it straight to
11967 * ctl_done() without executing it.
11969 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) {
11970 ctl_done((union ctl_io *)ctsio);
11975 * All the checks should have been handled by ctl_scsiio_precheck().
11976 * We should be clear now to just execute the I/O.
11978 retval = entry->execute(ctsio);
11985 * Since we only implement one target right now, a bus reset simply resets
11986 * our single target.
11989 ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io)
11991 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET));
11995 ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
11996 ctl_ua_type ua_type)
11998 struct ctl_lun *lun;
12001 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
12002 union ctl_ha_msg msg_info;
12004 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
12005 msg_info.hdr.nexus = io->io_hdr.nexus;
12006 if (ua_type==CTL_UA_TARG_RESET)
12007 msg_info.task.task_action = CTL_TASK_TARGET_RESET;
12009 msg_info.task.task_action = CTL_TASK_BUS_RESET;
12010 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12011 msg_info.hdr.original_sc = NULL;
12012 msg_info.hdr.serializing_sc = NULL;
12013 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL,
12014 (void *)&msg_info, sizeof(msg_info), 0)) {
12019 mtx_lock(&softc->ctl_lock);
12020 STAILQ_FOREACH(lun, &softc->lun_list, links)
12021 retval += ctl_lun_reset(lun, io, ua_type);
12022 mtx_unlock(&softc->ctl_lock);
12028 * The LUN should always be set. The I/O is optional, and is used to
12029 * distinguish between I/Os sent by this initiator, and by other
12030 * initiators. We set unit attention for initiators other than this one.
12031 * SAM-3 is vague on this point. It does say that a unit attention should
12032 * be established for other initiators when a LUN is reset (see section
12033 * 5.7.3), but it doesn't specifically say that the unit attention should
12034 * be established for this particular initiator when a LUN is reset. Here
12035 * is the relevant text, from SAM-3 rev 8:
12037 * 5.7.2 When a SCSI initiator port aborts its own tasks
12039 * When a SCSI initiator port causes its own task(s) to be aborted, no
12040 * notification that the task(s) have been aborted shall be returned to
12041 * the SCSI initiator port other than the completion response for the
12042 * command or task management function action that caused the task(s) to
12043 * be aborted and notification(s) associated with related effects of the
12044 * action (e.g., a reset unit attention condition).
12046 * XXX KDM for now, we're setting unit attention for all initiators.
12049 ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
12059 mtx_lock(&lun->lun_lock);
12061 * Run through the OOA queue and abort each I/O.
12063 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12064 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12065 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS;
12069 * This version sets unit attention for every
12072 initidx = ctl_get_initindex(&io->io_hdr.nexus);
12073 ctl_est_ua_all(lun, initidx, ua_type);
12075 ctl_est_ua_all(lun, -1, ua_type);
12079 * A reset (any kind, really) clears reservations established with
12080 * RESERVE/RELEASE. It does not clear reservations established
12081 * with PERSISTENT RESERVE OUT, but we don't support that at the
12082 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address
12083 * reservations made with the RESERVE/RELEASE commands, because
12084 * those commands are obsolete in SPC-3.
12086 lun->flags &= ~CTL_LUN_RESERVED;
12089 for (i = 0; i < CTL_MAX_INITIATORS; i++)
12090 ctl_clear_mask(lun->have_ca, i);
12092 mtx_unlock(&lun->lun_lock);
12098 ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
12103 mtx_assert(&lun->lun_lock, MA_OWNED);
12106 * Run through the OOA queue and attempt to find the given I/O.
12107 * The target port, initiator ID, tag type and tag number have to
12108 * match the values that we got from the initiator. If we have an
12109 * untagged command to abort, simply abort the first untagged command
12110 * we come to. We only allow one untagged command at a time of course.
12112 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12113 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12115 if ((targ_port == UINT32_MAX ||
12116 targ_port == xio->io_hdr.nexus.targ_port) &&
12117 (init_id == UINT32_MAX ||
12118 init_id == xio->io_hdr.nexus.initid.id)) {
12119 if (targ_port != xio->io_hdr.nexus.targ_port ||
12120 init_id != xio->io_hdr.nexus.initid.id)
12121 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS;
12122 xio->io_hdr.flags |= CTL_FLAG_ABORT;
12123 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) {
12124 union ctl_ha_msg msg_info;
12126 msg_info.hdr.nexus = xio->io_hdr.nexus;
12127 msg_info.task.task_action = CTL_TASK_ABORT_TASK;
12128 msg_info.task.tag_num = xio->scsiio.tag_num;
12129 msg_info.task.tag_type = xio->scsiio.tag_type;
12130 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12131 msg_info.hdr.original_sc = NULL;
12132 msg_info.hdr.serializing_sc = NULL;
12133 ctl_ha_msg_send(CTL_HA_CHAN_CTL,
12134 (void *)&msg_info, sizeof(msg_info), 0);
12141 ctl_abort_task_set(union ctl_io *io)
12143 struct ctl_softc *softc = control_softc;
12144 struct ctl_lun *lun;
12150 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12151 mtx_lock(&softc->ctl_lock);
12152 if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL))
12153 lun = softc->ctl_luns[targ_lun];
12155 mtx_unlock(&softc->ctl_lock);
12159 mtx_lock(&lun->lun_lock);
12160 mtx_unlock(&softc->ctl_lock);
12161 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) {
12162 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
12163 io->io_hdr.nexus.initid.id,
12164 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
12165 } else { /* CTL_TASK_CLEAR_TASK_SET */
12166 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX,
12167 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
12169 mtx_unlock(&lun->lun_lock);
12174 ctl_i_t_nexus_reset(union ctl_io *io)
12176 struct ctl_softc *softc = control_softc;
12177 struct ctl_lun *lun;
12178 uint32_t initidx, residx;
12180 initidx = ctl_get_initindex(&io->io_hdr.nexus);
12181 residx = ctl_get_resindex(&io->io_hdr.nexus);
12182 mtx_lock(&softc->ctl_lock);
12183 STAILQ_FOREACH(lun, &softc->lun_list, links) {
12184 mtx_lock(&lun->lun_lock);
12185 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
12186 io->io_hdr.nexus.initid.id,
12187 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
12189 ctl_clear_mask(lun->have_ca, initidx);
12191 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx))
12192 lun->flags &= ~CTL_LUN_RESERVED;
12193 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS);
12194 mtx_unlock(&lun->lun_lock);
12196 mtx_unlock(&softc->ctl_lock);
12201 ctl_abort_task(union ctl_io *io)
12204 struct ctl_lun *lun;
12205 struct ctl_softc *softc;
12208 char printbuf[128];
12213 softc = control_softc;
12219 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12220 mtx_lock(&softc->ctl_lock);
12221 if ((targ_lun < CTL_MAX_LUNS)
12222 && (softc->ctl_luns[targ_lun] != NULL))
12223 lun = softc->ctl_luns[targ_lun];
12225 mtx_unlock(&softc->ctl_lock);
12230 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n",
12231 lun->lun, io->taskio.tag_num, io->taskio.tag_type);
12234 mtx_lock(&lun->lun_lock);
12235 mtx_unlock(&softc->ctl_lock);
12237 * Run through the OOA queue and attempt to find the given I/O.
12238 * The target port, initiator ID, tag type and tag number have to
12239 * match the values that we got from the initiator. If we have an
12240 * untagged command to abort, simply abort the first untagged command
12241 * we come to. We only allow one untagged command at a time of course.
12243 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
12244 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
12246 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN);
12248 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ",
12249 lun->lun, xio->scsiio.tag_num,
12250 xio->scsiio.tag_type,
12251 (xio->io_hdr.blocked_links.tqe_prev
12252 == NULL) ? "" : " BLOCKED",
12253 (xio->io_hdr.flags &
12254 CTL_FLAG_DMA_INPROG) ? " DMA" : "",
12255 (xio->io_hdr.flags &
12256 CTL_FLAG_ABORT) ? " ABORT" : "",
12257 (xio->io_hdr.flags &
12258 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : ""));
12259 ctl_scsi_command_string(&xio->scsiio, NULL, &sb);
12261 printf("%s\n", sbuf_data(&sb));
12264 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
12265 || (xio->io_hdr.nexus.initid.id != io->io_hdr.nexus.initid.id)
12266 || (xio->io_hdr.flags & CTL_FLAG_ABORT))
12270 * If the abort says that the task is untagged, the
12271 * task in the queue must be untagged. Otherwise,
12272 * we just check to see whether the tag numbers
12273 * match. This is because the QLogic firmware
12274 * doesn't pass back the tag type in an abort
12278 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED)
12279 && (io->taskio.tag_type == CTL_TAG_UNTAGGED))
12280 || (xio->scsiio.tag_num == io->taskio.tag_num))
12283 * XXX KDM we've got problems with FC, because it
12284 * doesn't send down a tag type with aborts. So we
12285 * can only really go by the tag number...
12286 * This may cause problems with parallel SCSI.
12287 * Need to figure that out!!
12289 if (xio->scsiio.tag_num == io->taskio.tag_num) {
12290 xio->io_hdr.flags |= CTL_FLAG_ABORT;
12292 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 &&
12293 !(lun->flags & CTL_LUN_PRIMARY_SC)) {
12294 union ctl_ha_msg msg_info;
12296 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC;
12297 msg_info.hdr.nexus = io->io_hdr.nexus;
12298 msg_info.task.task_action = CTL_TASK_ABORT_TASK;
12299 msg_info.task.tag_num = io->taskio.tag_num;
12300 msg_info.task.tag_type = io->taskio.tag_type;
12301 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
12302 msg_info.hdr.original_sc = NULL;
12303 msg_info.hdr.serializing_sc = NULL;
12305 printf("Sent Abort to other side\n");
12307 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL,
12308 (void *)&msg_info, sizeof(msg_info), 0) !=
12309 CTL_HA_STATUS_SUCCESS) {
12313 printf("ctl_abort_task: found I/O to abort\n");
12317 mtx_unlock(&lun->lun_lock);
12321 * This isn't really an error. It's entirely possible for
12322 * the abort and command completion to cross on the wire.
12323 * This is more of an informative/diagnostic error.
12326 printf("ctl_abort_task: ABORT sent for nonexistent I/O: "
12327 "%d:%d:%d:%d tag %d type %d\n",
12328 io->io_hdr.nexus.initid.id,
12329 io->io_hdr.nexus.targ_port,
12330 io->io_hdr.nexus.targ_target.id,
12331 io->io_hdr.nexus.targ_lun, io->taskio.tag_num,
12332 io->taskio.tag_type);
12339 ctl_run_task(union ctl_io *io)
12341 struct ctl_softc *softc = control_softc;
12343 const char *task_desc;
12345 CTL_DEBUG_PRINT(("ctl_run_task\n"));
12347 KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
12348 ("ctl_run_task: Unextected io_type %d\n",
12349 io->io_hdr.io_type));
12351 task_desc = ctl_scsi_task_string(&io->taskio);
12352 if (task_desc != NULL) {
12354 csevent_log(CSC_CTL | CSC_SHELF_SW |
12356 csevent_LogType_Trace,
12357 csevent_Severity_Information,
12358 csevent_AlertLevel_Green,
12359 csevent_FRU_Firmware,
12360 csevent_FRU_Unknown,
12361 "CTL: received task: %s",task_desc);
12365 csevent_log(CSC_CTL | CSC_SHELF_SW |
12367 csevent_LogType_Trace,
12368 csevent_Severity_Information,
12369 csevent_AlertLevel_Green,
12370 csevent_FRU_Firmware,
12371 csevent_FRU_Unknown,
12372 "CTL: received unknown task "
12374 io->taskio.task_action,
12375 io->taskio.task_action);
12378 switch (io->taskio.task_action) {
12379 case CTL_TASK_ABORT_TASK:
12380 retval = ctl_abort_task(io);
12382 case CTL_TASK_ABORT_TASK_SET:
12383 case CTL_TASK_CLEAR_TASK_SET:
12384 retval = ctl_abort_task_set(io);
12386 case CTL_TASK_CLEAR_ACA:
12388 case CTL_TASK_I_T_NEXUS_RESET:
12389 retval = ctl_i_t_nexus_reset(io);
12391 case CTL_TASK_LUN_RESET: {
12392 struct ctl_lun *lun;
12395 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12396 mtx_lock(&softc->ctl_lock);
12397 if ((targ_lun < CTL_MAX_LUNS)
12398 && (softc->ctl_luns[targ_lun] != NULL))
12399 lun = softc->ctl_luns[targ_lun];
12401 mtx_unlock(&softc->ctl_lock);
12406 if (!(io->io_hdr.flags &
12407 CTL_FLAG_FROM_OTHER_SC)) {
12408 union ctl_ha_msg msg_info;
12410 io->io_hdr.flags |=
12411 CTL_FLAG_SENT_2OTHER_SC;
12412 msg_info.hdr.msg_type =
12413 CTL_MSG_MANAGE_TASKS;
12414 msg_info.hdr.nexus = io->io_hdr.nexus;
12415 msg_info.task.task_action =
12416 CTL_TASK_LUN_RESET;
12417 msg_info.hdr.original_sc = NULL;
12418 msg_info.hdr.serializing_sc = NULL;
12419 if (CTL_HA_STATUS_SUCCESS !=
12420 ctl_ha_msg_send(CTL_HA_CHAN_CTL,
12422 sizeof(msg_info), 0)) {
12426 retval = ctl_lun_reset(lun, io,
12428 mtx_unlock(&softc->ctl_lock);
12431 case CTL_TASK_TARGET_RESET:
12432 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET);
12434 case CTL_TASK_BUS_RESET:
12435 retval = ctl_bus_reset(softc, io);
12437 case CTL_TASK_PORT_LOGIN:
12439 case CTL_TASK_PORT_LOGOUT:
12442 printf("ctl_run_task: got unknown task management event %d\n",
12443 io->taskio.task_action);
12447 io->io_hdr.status = CTL_SUCCESS;
12449 io->io_hdr.status = CTL_ERROR;
12454 * For HA operation. Handle commands that come in from the other
12458 ctl_handle_isc(union ctl_io *io)
12461 struct ctl_lun *lun;
12462 struct ctl_softc *softc;
12465 softc = control_softc;
12467 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12468 lun = softc->ctl_luns[targ_lun];
12470 switch (io->io_hdr.msg_type) {
12471 case CTL_MSG_SERIALIZE:
12472 free_io = ctl_serialize_other_sc_cmd(&io->scsiio);
12474 case CTL_MSG_R2R: {
12475 const struct ctl_cmd_entry *entry;
12478 * This is only used in SER_ONLY mode.
12481 entry = ctl_get_cmd_entry(&io->scsiio, NULL);
12482 mtx_lock(&lun->lun_lock);
12483 if (ctl_scsiio_lun_check(lun,
12484 entry, (struct ctl_scsiio *)io) != 0) {
12485 mtx_unlock(&lun->lun_lock);
12489 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR;
12490 mtx_unlock(&lun->lun_lock);
12491 ctl_enqueue_rtr(io);
12494 case CTL_MSG_FINISH_IO:
12495 if (softc->ha_mode == CTL_HA_MODE_XFER) {
12500 mtx_lock(&lun->lun_lock);
12501 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr,
12503 ctl_check_blocked(lun);
12504 mtx_unlock(&lun->lun_lock);
12507 case CTL_MSG_PERS_ACTION:
12508 ctl_hndl_per_res_out_on_other_sc(
12509 (union ctl_ha_msg *)&io->presio.pr_msg);
12512 case CTL_MSG_BAD_JUJU:
12516 case CTL_MSG_DATAMOVE:
12517 /* Only used in XFER mode */
12519 ctl_datamove_remote(io);
12521 case CTL_MSG_DATAMOVE_DONE:
12522 /* Only used in XFER mode */
12524 io->scsiio.be_move_done(io);
12528 printf("%s: Invalid message type %d\n",
12529 __func__, io->io_hdr.msg_type);
12539 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if
12540 * there is no match.
12542 static ctl_lun_error_pattern
12543 ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc)
12545 const struct ctl_cmd_entry *entry;
12546 ctl_lun_error_pattern filtered_pattern, pattern;
12548 pattern = desc->error_pattern;
12551 * XXX KDM we need more data passed into this function to match a
12552 * custom pattern, and we actually need to implement custom pattern
12555 if (pattern & CTL_LUN_PAT_CMD)
12556 return (CTL_LUN_PAT_CMD);
12558 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY)
12559 return (CTL_LUN_PAT_ANY);
12561 entry = ctl_get_cmd_entry(ctsio, NULL);
12563 filtered_pattern = entry->pattern & pattern;
12566 * If the user requested specific flags in the pattern (e.g.
12567 * CTL_LUN_PAT_RANGE), make sure the command supports all of those
12570 * If the user did not specify any flags, it doesn't matter whether
12571 * or not the command supports the flags.
12573 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) !=
12574 (pattern & ~CTL_LUN_PAT_MASK))
12575 return (CTL_LUN_PAT_NONE);
12578 * If the user asked for a range check, see if the requested LBA
12579 * range overlaps with this command's LBA range.
12581 if (filtered_pattern & CTL_LUN_PAT_RANGE) {
12587 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1);
12589 return (CTL_LUN_PAT_NONE);
12591 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba,
12592 desc->lba_range.len, FALSE);
12594 * A "pass" means that the LBA ranges don't overlap, so
12595 * this doesn't match the user's range criteria.
12597 if (action == CTL_ACTION_PASS)
12598 return (CTL_LUN_PAT_NONE);
12601 return (filtered_pattern);
12605 ctl_inject_error(struct ctl_lun *lun, union ctl_io *io)
12607 struct ctl_error_desc *desc, *desc2;
12609 mtx_assert(&lun->lun_lock, MA_OWNED);
12611 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) {
12612 ctl_lun_error_pattern pattern;
12614 * Check to see whether this particular command matches
12615 * the pattern in the descriptor.
12617 pattern = ctl_cmd_pattern_match(&io->scsiio, desc);
12618 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE)
12621 switch (desc->lun_error & CTL_LUN_INJ_TYPE) {
12622 case CTL_LUN_INJ_ABORTED:
12623 ctl_set_aborted(&io->scsiio);
12625 case CTL_LUN_INJ_MEDIUM_ERR:
12626 ctl_set_medium_error(&io->scsiio);
12628 case CTL_LUN_INJ_UA:
12629 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET
12631 ctl_set_ua(&io->scsiio, 0x29, 0x00);
12633 case CTL_LUN_INJ_CUSTOM:
12635 * We're assuming the user knows what he is doing.
12636 * Just copy the sense information without doing
12639 bcopy(&desc->custom_sense, &io->scsiio.sense_data,
12640 MIN(sizeof(desc->custom_sense),
12641 sizeof(io->scsiio.sense_data)));
12642 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND;
12643 io->scsiio.sense_len = SSD_FULL_SIZE;
12644 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
12646 case CTL_LUN_INJ_NONE:
12649 * If this is an error injection type we don't know
12650 * about, clear the continuous flag (if it is set)
12651 * so it will get deleted below.
12653 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS;
12657 * By default, each error injection action is a one-shot
12659 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS)
12662 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links);
12668 #ifdef CTL_IO_DELAY
12670 ctl_datamove_timer_wakeup(void *arg)
12674 io = (union ctl_io *)arg;
12678 #endif /* CTL_IO_DELAY */
12681 ctl_datamove(union ctl_io *io)
12683 void (*fe_datamove)(union ctl_io *io);
12685 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
12687 CTL_DEBUG_PRINT(("ctl_datamove\n"));
12690 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
12695 ctl_scsi_path_string(io, path_str, sizeof(path_str));
12696 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
12698 sbuf_cat(&sb, path_str);
12699 switch (io->io_hdr.io_type) {
12701 ctl_scsi_command_string(&io->scsiio, NULL, &sb);
12702 sbuf_printf(&sb, "\n");
12703 sbuf_cat(&sb, path_str);
12704 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
12705 io->scsiio.tag_num, io->scsiio.tag_type);
12708 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
12709 "Tag Type: %d\n", io->taskio.task_action,
12710 io->taskio.tag_num, io->taskio.tag_type);
12713 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
12714 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
12717 sbuf_cat(&sb, path_str);
12718 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n",
12719 (intmax_t)time_uptime - io->io_hdr.start_time);
12721 printf("%s", sbuf_data(&sb));
12723 #endif /* CTL_TIME_IO */
12725 #ifdef CTL_IO_DELAY
12726 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
12727 struct ctl_lun *lun;
12729 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12731 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
12733 struct ctl_lun *lun;
12735 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
12737 && (lun->delay_info.datamove_delay > 0)) {
12738 struct callout *callout;
12740 callout = (struct callout *)&io->io_hdr.timer_bytes;
12741 callout_init(callout, /*mpsafe*/ 1);
12742 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
12743 callout_reset(callout,
12744 lun->delay_info.datamove_delay * hz,
12745 ctl_datamove_timer_wakeup, io);
12746 if (lun->delay_info.datamove_type ==
12747 CTL_DELAY_TYPE_ONESHOT)
12748 lun->delay_info.datamove_delay = 0;
12755 * This command has been aborted. Set the port status, so we fail
12758 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
12759 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n",
12760 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id,
12761 io->io_hdr.nexus.targ_port,
12762 (uintmax_t)io->io_hdr.nexus.targ_target.id,
12763 io->io_hdr.nexus.targ_lun);
12764 io->io_hdr.port_status = 31337;
12766 * Note that the backend, in this case, will get the
12767 * callback in its context. In other cases it may get
12768 * called in the frontend's interrupt thread context.
12770 io->scsiio.be_move_done(io);
12774 /* Don't confuse frontend with zero length data move. */
12775 if (io->scsiio.kern_data_len == 0) {
12776 io->scsiio.be_move_done(io);
12781 * If we're in XFER mode and this I/O is from the other shelf
12782 * controller, we need to send the DMA to the other side to
12783 * actually transfer the data to/from the host. In serialize only
12784 * mode the transfer happens below CTL and ctl_datamove() is only
12785 * called on the machine that originally received the I/O.
12787 if ((control_softc->ha_mode == CTL_HA_MODE_XFER)
12788 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
12789 union ctl_ha_msg msg;
12790 uint32_t sg_entries_sent;
12794 memset(&msg, 0, sizeof(msg));
12795 msg.hdr.msg_type = CTL_MSG_DATAMOVE;
12796 msg.hdr.original_sc = io->io_hdr.original_sc;
12797 msg.hdr.serializing_sc = io;
12798 msg.hdr.nexus = io->io_hdr.nexus;
12799 msg.dt.flags = io->io_hdr.flags;
12801 * We convert everything into a S/G list here. We can't
12802 * pass by reference, only by value between controllers.
12803 * So we can't pass a pointer to the S/G list, only as many
12804 * S/G entries as we can fit in here. If it's possible for
12805 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries,
12806 * then we need to break this up into multiple transfers.
12808 if (io->scsiio.kern_sg_entries == 0) {
12809 msg.dt.kern_sg_entries = 1;
12811 * If this is in cached memory, flush the cache
12812 * before we send the DMA request to the other
12813 * controller. We want to do this in either the
12814 * read or the write case. The read case is
12815 * straightforward. In the write case, we want to
12816 * make sure nothing is in the local cache that
12817 * could overwrite the DMAed data.
12819 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
12821 * XXX KDM use bus_dmamap_sync() here.
12826 * Convert to a physical address if this is a
12829 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
12830 msg.dt.sg_list[0].addr =
12831 io->scsiio.kern_data_ptr;
12834 * XXX KDM use busdma here!
12837 msg.dt.sg_list[0].addr = (void *)
12838 vtophys(io->scsiio.kern_data_ptr);
12842 msg.dt.sg_list[0].len = io->scsiio.kern_data_len;
12845 struct ctl_sg_entry *sgl;
12848 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries;
12849 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
12850 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
12852 * XXX KDM use bus_dmamap_sync() here.
12857 msg.dt.kern_data_len = io->scsiio.kern_data_len;
12858 msg.dt.kern_total_len = io->scsiio.kern_total_len;
12859 msg.dt.kern_data_resid = io->scsiio.kern_data_resid;
12860 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset;
12861 msg.dt.sg_sequence = 0;
12864 * Loop until we've sent all of the S/G entries. On the
12865 * other end, we'll recompose these S/G entries into one
12866 * contiguous list before passing it to the
12868 for (sg_entries_sent = 0; sg_entries_sent <
12869 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) {
12870 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/
12871 sizeof(msg.dt.sg_list[0])),
12872 msg.dt.kern_sg_entries - sg_entries_sent);
12874 if (do_sg_copy != 0) {
12875 struct ctl_sg_entry *sgl;
12878 sgl = (struct ctl_sg_entry *)
12879 io->scsiio.kern_data_ptr;
12881 * If this is in cached memory, flush the cache
12882 * before we send the DMA request to the other
12883 * controller. We want to do this in either
12884 * the * read or the write case. The read
12885 * case is straightforward. In the write
12886 * case, we want to make sure nothing is
12887 * in the local cache that could overwrite
12891 for (i = sg_entries_sent, j = 0;
12892 i < msg.dt.cur_sg_entries; i++, j++) {
12893 if ((io->io_hdr.flags &
12894 CTL_FLAG_NO_DATASYNC) == 0) {
12896 * XXX KDM use bus_dmamap_sync()
12899 if ((io->io_hdr.flags &
12900 CTL_FLAG_BUS_ADDR) == 0) {
12902 * XXX KDM use busdma.
12905 msg.dt.sg_list[j].addr =(void *)
12906 vtophys(sgl[i].addr);
12909 msg.dt.sg_list[j].addr =
12912 msg.dt.sg_list[j].len = sgl[i].len;
12916 sg_entries_sent += msg.dt.cur_sg_entries;
12917 if (sg_entries_sent >= msg.dt.kern_sg_entries)
12918 msg.dt.sg_last = 1;
12920 msg.dt.sg_last = 0;
12923 * XXX KDM drop and reacquire the lock here?
12925 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
12926 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
12928 * XXX do something here.
12932 msg.dt.sent_sg_entries = sg_entries_sent;
12934 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
12935 if (io->io_hdr.flags & CTL_FLAG_FAILOVER)
12936 ctl_failover_io(io, /*have_lock*/ 0);
12941 * Lookup the fe_datamove() function for this particular
12945 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
12952 ctl_send_datamove_done(union ctl_io *io, int have_lock)
12954 union ctl_ha_msg msg;
12957 memset(&msg, 0, sizeof(msg));
12959 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE;
12960 msg.hdr.original_sc = io;
12961 msg.hdr.serializing_sc = io->io_hdr.serializing_sc;
12962 msg.hdr.nexus = io->io_hdr.nexus;
12963 msg.hdr.status = io->io_hdr.status;
12964 msg.scsi.tag_num = io->scsiio.tag_num;
12965 msg.scsi.tag_type = io->scsiio.tag_type;
12966 msg.scsi.scsi_status = io->scsiio.scsi_status;
12967 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
12968 sizeof(io->scsiio.sense_data));
12969 msg.scsi.sense_len = io->scsiio.sense_len;
12970 msg.scsi.sense_residual = io->scsiio.sense_residual;
12971 msg.scsi.fetd_status = io->io_hdr.port_status;
12972 msg.scsi.residual = io->scsiio.residual;
12973 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
12975 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
12976 ctl_failover_io(io, /*have_lock*/ have_lock);
12980 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0);
12981 if (isc_status > CTL_HA_STATUS_SUCCESS) {
12982 /* XXX do something if this fails */
12988 * The DMA to the remote side is done, now we need to tell the other side
12989 * we're done so it can continue with its data movement.
12992 ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq)
12998 if (rq->ret != CTL_HA_STATUS_SUCCESS) {
12999 printf("%s: ISC DMA write failed with error %d", __func__,
13001 ctl_set_internal_failure(&io->scsiio,
13003 /*retry_count*/ rq->ret);
13006 ctl_dt_req_free(rq);
13009 * In this case, we had to malloc the memory locally. Free it.
13011 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
13013 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
13014 free(io->io_hdr.local_sglist[i].addr, M_CTL);
13017 * The data is in local and remote memory, so now we need to send
13018 * status (good or back) back to the other side.
13020 ctl_send_datamove_done(io, /*have_lock*/ 0);
13024 * We've moved the data from the host/controller into local memory. Now we
13025 * need to push it over to the remote controller's memory.
13028 ctl_datamove_remote_dm_write_cb(union ctl_io *io)
13034 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE,
13035 ctl_datamove_remote_write_cb);
13041 ctl_datamove_remote_write(union ctl_io *io)
13044 void (*fe_datamove)(union ctl_io *io);
13047 * - Get the data from the host/HBA into local memory.
13048 * - DMA memory from the local controller to the remote controller.
13049 * - Send status back to the remote controller.
13052 retval = ctl_datamove_remote_sgl_setup(io);
13056 /* Switch the pointer over so the FETD knows what to do */
13057 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
13060 * Use a custom move done callback, since we need to send completion
13061 * back to the other controller, not to the backend on this side.
13063 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
13065 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
13074 ctl_datamove_remote_dm_read_cb(union ctl_io *io)
13083 * In this case, we had to malloc the memory locally. Free it.
13085 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) {
13087 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
13088 free(io->io_hdr.local_sglist[i].addr, M_CTL);
13092 scsi_path_string(io, path_str, sizeof(path_str));
13093 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
13094 sbuf_cat(&sb, path_str);
13095 scsi_command_string(&io->scsiio, NULL, &sb);
13096 sbuf_printf(&sb, "\n");
13097 sbuf_cat(&sb, path_str);
13098 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
13099 io->scsiio.tag_num, io->scsiio.tag_type);
13100 sbuf_cat(&sb, path_str);
13101 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__,
13102 io->io_hdr.flags, io->io_hdr.status);
13104 printk("%s", sbuf_data(&sb));
13109 * The read is done, now we need to send status (good or bad) back
13110 * to the other side.
13112 ctl_send_datamove_done(io, /*have_lock*/ 0);
13118 ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq)
13121 void (*fe_datamove)(union ctl_io *io);
13125 if (rq->ret != CTL_HA_STATUS_SUCCESS) {
13126 printf("%s: ISC DMA read failed with error %d", __func__,
13128 ctl_set_internal_failure(&io->scsiio,
13130 /*retry_count*/ rq->ret);
13133 ctl_dt_req_free(rq);
13135 /* Switch the pointer over so the FETD knows what to do */
13136 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
13139 * Use a custom move done callback, since we need to send completion
13140 * back to the other controller, not to the backend on this side.
13142 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb;
13144 /* XXX KDM add checks like the ones in ctl_datamove? */
13146 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove;
13152 ctl_datamove_remote_sgl_setup(union ctl_io *io)
13154 struct ctl_sg_entry *local_sglist, *remote_sglist;
13155 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist;
13156 struct ctl_softc *softc;
13161 softc = control_softc;
13163 local_sglist = io->io_hdr.local_sglist;
13164 local_dma_sglist = io->io_hdr.local_dma_sglist;
13165 remote_sglist = io->io_hdr.remote_sglist;
13166 remote_dma_sglist = io->io_hdr.remote_dma_sglist;
13168 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) {
13169 for (i = 0; i < io->scsiio.kern_sg_entries; i++) {
13170 local_sglist[i].len = remote_sglist[i].len;
13173 * XXX Detect the situation where the RS-level I/O
13174 * redirector on the other side has already read the
13175 * data off of the AOR RS on this side, and
13176 * transferred it to remote (mirror) memory on the
13177 * other side. Since we already have the data in
13178 * memory here, we just need to use it.
13180 * XXX KDM this can probably be removed once we
13181 * get the cache device code in and take the
13182 * current AOR implementation out.
13185 if ((remote_sglist[i].addr >=
13186 (void *)vtophys(softc->mirr->addr))
13187 && (remote_sglist[i].addr <
13188 ((void *)vtophys(softc->mirr->addr) +
13189 CacheMirrorOffset))) {
13190 local_sglist[i].addr = remote_sglist[i].addr -
13192 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13194 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE;
13196 local_sglist[i].addr = remote_sglist[i].addr +
13201 printf("%s: local %p, remote %p, len %d\n",
13202 __func__, local_sglist[i].addr,
13203 remote_sglist[i].addr, local_sglist[i].len);
13207 uint32_t len_to_go;
13210 * In this case, we don't have automatically allocated
13211 * memory for this I/O on this controller. This typically
13212 * happens with internal CTL I/O -- e.g. inquiry, mode
13213 * sense, etc. Anything coming from RAIDCore will have
13214 * a mirror area available.
13216 len_to_go = io->scsiio.kern_data_len;
13219 * Clear the no datasync flag, we have to use malloced
13222 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC;
13225 * The difficult thing here is that the size of the various
13226 * S/G segments may be different than the size from the
13227 * remote controller. That'll make it harder when DMAing
13228 * the data back to the other side.
13230 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) /
13231 sizeof(io->io_hdr.remote_sglist[0])) &&
13232 (len_to_go > 0); i++) {
13233 local_sglist[i].len = MIN(len_to_go, 131072);
13234 CTL_SIZE_8B(local_dma_sglist[i].len,
13235 local_sglist[i].len);
13236 local_sglist[i].addr =
13237 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK);
13239 local_dma_sglist[i].addr = local_sglist[i].addr;
13241 if (local_sglist[i].addr == NULL) {
13244 printf("malloc failed for %zd bytes!",
13245 local_dma_sglist[i].len);
13246 for (j = 0; j < i; j++) {
13247 free(local_sglist[j].addr, M_CTL);
13249 ctl_set_internal_failure(&io->scsiio,
13251 /*retry_count*/ 4857);
13253 goto bailout_error;
13256 /* XXX KDM do we need a sync here? */
13258 len_to_go -= local_sglist[i].len;
13261 * Reset the number of S/G entries accordingly. The
13262 * original number of S/G entries is available in
13265 io->scsiio.kern_sg_entries = i;
13268 printf("%s: kern_sg_entries = %d\n", __func__,
13269 io->scsiio.kern_sg_entries);
13270 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
13271 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i,
13272 local_sglist[i].addr, local_sglist[i].len,
13273 local_dma_sglist[i].len);
13282 ctl_send_datamove_done(io, /*have_lock*/ 0);
13288 ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
13289 ctl_ha_dt_cb callback)
13291 struct ctl_ha_dt_req *rq;
13292 struct ctl_sg_entry *remote_sglist, *local_sglist;
13293 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist;
13294 uint32_t local_used, remote_used, total_used;
13300 rq = ctl_dt_req_alloc();
13303 * If we failed to allocate the request, and if the DMA didn't fail
13304 * anyway, set busy status. This is just a resource allocation
13308 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE))
13309 ctl_set_busy(&io->scsiio);
13311 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) {
13314 ctl_dt_req_free(rq);
13317 * The data move failed. We need to return status back
13318 * to the other controller. No point in trying to DMA
13319 * data to the remote controller.
13322 ctl_send_datamove_done(io, /*have_lock*/ 0);
13329 local_sglist = io->io_hdr.local_sglist;
13330 local_dma_sglist = io->io_hdr.local_dma_sglist;
13331 remote_sglist = io->io_hdr.remote_sglist;
13332 remote_dma_sglist = io->io_hdr.remote_dma_sglist;
13337 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) {
13338 rq->ret = CTL_HA_STATUS_SUCCESS;
13345 * Pull/push the data over the wire from/to the other controller.
13346 * This takes into account the possibility that the local and
13347 * remote sglists may not be identical in terms of the size of
13348 * the elements and the number of elements.
13350 * One fundamental assumption here is that the length allocated for
13351 * both the local and remote sglists is identical. Otherwise, we've
13352 * essentially got a coding error of some sort.
13354 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) {
13356 uint32_t cur_len, dma_length;
13359 rq->id = CTL_HA_DATA_CTL;
13360 rq->command = command;
13364 * Both pointers should be aligned. But it is possible
13365 * that the allocation length is not. They should both
13366 * also have enough slack left over at the end, though,
13367 * to round up to the next 8 byte boundary.
13369 cur_len = MIN(local_sglist[i].len - local_used,
13370 remote_sglist[j].len - remote_used);
13373 * In this case, we have a size issue and need to decrease
13374 * the size, except in the case where we actually have less
13375 * than 8 bytes left. In that case, we need to increase
13376 * the DMA length to get the last bit.
13378 if ((cur_len & 0x7) != 0) {
13379 if (cur_len > 0x7) {
13380 cur_len = cur_len - (cur_len & 0x7);
13381 dma_length = cur_len;
13383 CTL_SIZE_8B(dma_length, cur_len);
13387 dma_length = cur_len;
13390 * If we had to allocate memory for this I/O, instead of using
13391 * the non-cached mirror memory, we'll need to flush the cache
13392 * before trying to DMA to the other controller.
13394 * We could end up doing this multiple times for the same
13395 * segment if we have a larger local segment than remote
13396 * segment. That shouldn't be an issue.
13398 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) {
13400 * XXX KDM use bus_dmamap_sync() here.
13404 rq->size = dma_length;
13406 tmp_ptr = (uint8_t *)local_sglist[i].addr;
13407 tmp_ptr += local_used;
13409 /* Use physical addresses when talking to ISC hardware */
13410 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) {
13411 /* XXX KDM use busdma */
13413 rq->local = vtophys(tmp_ptr);
13416 rq->local = tmp_ptr;
13418 tmp_ptr = (uint8_t *)remote_sglist[j].addr;
13419 tmp_ptr += remote_used;
13420 rq->remote = tmp_ptr;
13422 rq->callback = NULL;
13424 local_used += cur_len;
13425 if (local_used >= local_sglist[i].len) {
13430 remote_used += cur_len;
13431 if (remote_used >= remote_sglist[j].len) {
13435 total_used += cur_len;
13437 if (total_used >= io->scsiio.kern_data_len)
13438 rq->callback = callback;
13440 if ((rq->size & 0x7) != 0) {
13441 printf("%s: warning: size %d is not on 8b boundary\n",
13442 __func__, rq->size);
13444 if (((uintptr_t)rq->local & 0x7) != 0) {
13445 printf("%s: warning: local %p not on 8b boundary\n",
13446 __func__, rq->local);
13448 if (((uintptr_t)rq->remote & 0x7) != 0) {
13449 printf("%s: warning: remote %p not on 8b boundary\n",
13450 __func__, rq->local);
13453 printf("%s: %s: local %#x remote %#x size %d\n", __func__,
13454 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ",
13455 rq->local, rq->remote, rq->size);
13458 isc_ret = ctl_dt_single(rq);
13459 if (isc_ret == CTL_HA_STATUS_WAIT)
13462 if (isc_ret == CTL_HA_STATUS_DISCONNECT) {
13463 rq->ret = CTL_HA_STATUS_SUCCESS;
13477 ctl_datamove_remote_read(union ctl_io *io)
13483 * This will send an error to the other controller in the case of a
13486 retval = ctl_datamove_remote_sgl_setup(io);
13490 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ,
13491 ctl_datamove_remote_read_cb);
13493 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) {
13495 * Make sure we free memory if there was an error.. The
13496 * ctl_datamove_remote_xfer() function will send the
13497 * datamove done message, or call the callback with an
13498 * error if there is a problem.
13500 for (i = 0; i < io->scsiio.kern_sg_entries; i++)
13501 free(io->io_hdr.local_sglist[i].addr, M_CTL);
13508 * Process a datamove request from the other controller. This is used for
13509 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory
13510 * first. Once that is complete, the data gets DMAed into the remote
13511 * controller's memory. For reads, we DMA from the remote controller's
13512 * memory into our memory first, and then move it out to the FETD.
13515 ctl_datamove_remote(union ctl_io *io)
13517 struct ctl_softc *softc;
13519 softc = control_softc;
13521 mtx_assert(&softc->ctl_lock, MA_NOTOWNED);
13524 * Note that we look for an aborted I/O here, but don't do some of
13525 * the other checks that ctl_datamove() normally does.
13526 * We don't need to run the datamove delay code, since that should
13527 * have been done if need be on the other controller.
13529 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
13530 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__,
13531 io->scsiio.tag_num, io->io_hdr.nexus.initid.id,
13532 io->io_hdr.nexus.targ_port,
13533 io->io_hdr.nexus.targ_target.id,
13534 io->io_hdr.nexus.targ_lun);
13535 io->io_hdr.port_status = 31338;
13536 ctl_send_datamove_done(io, /*have_lock*/ 0);
13540 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) {
13541 ctl_datamove_remote_write(io);
13542 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){
13543 ctl_datamove_remote_read(io);
13545 union ctl_ha_msg msg;
13546 struct scsi_sense_data *sense;
13550 memset(&msg, 0, sizeof(msg));
13552 msg.hdr.msg_type = CTL_MSG_BAD_JUJU;
13553 msg.hdr.status = CTL_SCSI_ERROR;
13554 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND;
13556 retry_count = 4243;
13558 sense = &msg.scsi.sense_data;
13559 sks[0] = SSD_SCS_VALID;
13560 sks[1] = (retry_count >> 8) & 0xff;
13561 sks[2] = retry_count & 0xff;
13563 /* "Internal target failure" */
13564 scsi_set_sense_data(sense,
13565 /*sense_format*/ SSD_TYPE_NONE,
13566 /*current_error*/ 1,
13567 /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
13570 /*type*/ SSD_ELEM_SKS,
13571 /*size*/ sizeof(sks),
13575 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
13576 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
13577 ctl_failover_io(io, /*have_lock*/ 1);
13581 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) >
13582 CTL_HA_STATUS_SUCCESS) {
13583 /* XXX KDM what to do if this fails? */
13591 ctl_process_done(union ctl_io *io)
13593 struct ctl_lun *lun;
13594 struct ctl_softc *softc = control_softc;
13595 void (*fe_done)(union ctl_io *io);
13596 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port);
13598 CTL_DEBUG_PRINT(("ctl_process_done\n"));
13600 fe_done = softc->ctl_ports[targ_port]->fe_done;
13603 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
13608 ctl_scsi_path_string(io, path_str, sizeof(path_str));
13609 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);
13611 sbuf_cat(&sb, path_str);
13612 switch (io->io_hdr.io_type) {
13614 ctl_scsi_command_string(&io->scsiio, NULL, &sb);
13615 sbuf_printf(&sb, "\n");
13616 sbuf_cat(&sb, path_str);
13617 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n",
13618 io->scsiio.tag_num, io->scsiio.tag_type);
13621 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, "
13622 "Tag Type: %d\n", io->taskio.task_action,
13623 io->taskio.tag_num, io->taskio.tag_type);
13626 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
13627 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type);
13630 sbuf_cat(&sb, path_str);
13631 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n",
13632 (intmax_t)time_uptime - io->io_hdr.start_time);
13634 printf("%s", sbuf_data(&sb));
13636 #endif /* CTL_TIME_IO */
13638 switch (io->io_hdr.io_type) {
13642 if (ctl_debug & CTL_DEBUG_INFO)
13643 ctl_io_error_print(io, NULL);
13644 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)
13648 return (CTL_RETVAL_COMPLETE);
13650 panic("ctl_process_done: invalid io type %d\n",
13651 io->io_hdr.io_type);
13652 break; /* NOTREACHED */
13655 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13657 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
13658 io->io_hdr.nexus.targ_mapped_lun));
13662 mtx_lock(&lun->lun_lock);
13665 * Check to see if we have any errors to inject here. We only
13666 * inject errors for commands that don't already have errors set.
13668 if ((STAILQ_FIRST(&lun->error_list) != NULL) &&
13669 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) &&
13670 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0))
13671 ctl_inject_error(lun, io);
13674 * XXX KDM how do we treat commands that aren't completed
13677 * XXX KDM should we also track I/O latency?
13679 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS &&
13680 io->io_hdr.io_type == CTL_IO_SCSI) {
13682 struct bintime cur_bt;
13686 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13688 type = CTL_STATS_READ;
13689 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
13691 type = CTL_STATS_WRITE;
13693 type = CTL_STATS_NO_IO;
13695 lun->stats.ports[targ_port].bytes[type] +=
13696 io->scsiio.kern_total_len;
13697 lun->stats.ports[targ_port].operations[type]++;
13699 bintime_add(&lun->stats.ports[targ_port].dma_time[type],
13700 &io->io_hdr.dma_bt);
13701 lun->stats.ports[targ_port].num_dmas[type] +=
13702 io->io_hdr.num_dmas;
13703 getbintime(&cur_bt);
13704 bintime_sub(&cur_bt, &io->io_hdr.start_bt);
13705 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt);
13710 * Remove this from the OOA queue.
13712 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
13714 if (TAILQ_EMPTY(&lun->ooa_queue))
13715 lun->last_busy = getsbinuptime();
13719 * Run through the blocked queue on this LUN and see if anything
13720 * has become unblocked, now that this transaction is done.
13722 ctl_check_blocked(lun);
13725 * If the LUN has been invalidated, free it if there is nothing
13726 * left on its OOA queue.
13728 if ((lun->flags & CTL_LUN_INVALID)
13729 && TAILQ_EMPTY(&lun->ooa_queue)) {
13730 mtx_unlock(&lun->lun_lock);
13731 mtx_lock(&softc->ctl_lock);
13733 mtx_unlock(&softc->ctl_lock);
13735 mtx_unlock(&lun->lun_lock);
13740 * If this command has been aborted, make sure we set the status
13741 * properly. The FETD is responsible for freeing the I/O and doing
13742 * whatever it needs to do to clean up its state.
13744 if (io->io_hdr.flags & CTL_FLAG_ABORT)
13745 ctl_set_task_aborted(&io->scsiio);
13748 * If enabled, print command error status.
13750 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS &&
13751 (ctl_debug & CTL_DEBUG_INFO) != 0)
13752 ctl_io_error_print(io, NULL);
13755 * Tell the FETD or the other shelf controller we're done with this
13756 * command. Note that only SCSI commands get to this point. Task
13757 * management commands are completed above.
13759 * We only send status to the other controller if we're in XFER
13760 * mode. In SER_ONLY mode, the I/O is done on the controller that
13761 * received the I/O (from CTL's perspective), and so the status is
13764 * XXX KDM if we hold the lock here, we could cause a deadlock
13765 * if the frontend comes back in in this context to queue
13768 if ((softc->ha_mode == CTL_HA_MODE_XFER)
13769 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
13770 union ctl_ha_msg msg;
13772 memset(&msg, 0, sizeof(msg));
13773 msg.hdr.msg_type = CTL_MSG_FINISH_IO;
13774 msg.hdr.original_sc = io->io_hdr.original_sc;
13775 msg.hdr.nexus = io->io_hdr.nexus;
13776 msg.hdr.status = io->io_hdr.status;
13777 msg.scsi.scsi_status = io->scsiio.scsi_status;
13778 msg.scsi.tag_num = io->scsiio.tag_num;
13779 msg.scsi.tag_type = io->scsiio.tag_type;
13780 msg.scsi.sense_len = io->scsiio.sense_len;
13781 msg.scsi.sense_residual = io->scsiio.sense_residual;
13782 msg.scsi.residual = io->scsiio.residual;
13783 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
13784 sizeof(io->scsiio.sense_data));
13786 * We copy this whether or not this is an I/O-related
13787 * command. Otherwise, we'd have to go and check to see
13788 * whether it's a read/write command, and it really isn't
13791 memcpy(&msg.scsi.lbalen,
13792 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
13793 sizeof(msg.scsi.lbalen));
13795 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
13796 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) {
13797 /* XXX do something here */
13804 return (CTL_RETVAL_COMPLETE);
13809 * Front end should call this if it doesn't do autosense. When the request
13810 * sense comes back in from the initiator, we'll dequeue this and send it.
13813 ctl_queue_sense(union ctl_io *io)
13815 struct ctl_lun *lun;
13816 struct ctl_port *port;
13817 struct ctl_softc *softc;
13818 uint32_t initidx, targ_lun;
13820 softc = control_softc;
13822 CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
13825 * LUN lookup will likely move to the ctl_work_thread() once we
13826 * have our new queueing infrastructure (that doesn't put things on
13827 * a per-LUN queue initially). That is so that we can handle
13828 * things like an INQUIRY to a LUN that we don't have enabled. We
13829 * can't deal with that right now.
13831 mtx_lock(&softc->ctl_lock);
13834 * If we don't have a LUN for this, just toss the sense
13837 port = ctl_io_port(&ctsio->io_hdr);
13838 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
13839 if ((targ_lun < CTL_MAX_LUNS)
13840 && (softc->ctl_luns[targ_lun] != NULL))
13841 lun = softc->ctl_luns[targ_lun];
13845 initidx = ctl_get_initindex(&io->io_hdr.nexus);
13847 mtx_lock(&lun->lun_lock);
13849 * Already have CA set for this LUN...toss the sense information.
13851 if (ctl_is_set(lun->have_ca, initidx)) {
13852 mtx_unlock(&lun->lun_lock);
13856 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data,
13857 MIN(sizeof(lun->pending_sense[initidx]),
13858 sizeof(io->scsiio.sense_data)));
13859 ctl_set_mask(lun->have_ca, initidx);
13860 mtx_unlock(&lun->lun_lock);
13863 mtx_unlock(&softc->ctl_lock);
13867 return (CTL_RETVAL_COMPLETE);
13872 * Primary command inlet from frontend ports. All SCSI and task I/O
13873 * requests must go through this function.
13876 ctl_queue(union ctl_io *io)
13878 struct ctl_port *port;
13880 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
13883 io->io_hdr.start_time = time_uptime;
13884 getbintime(&io->io_hdr.start_bt);
13885 #endif /* CTL_TIME_IO */
13887 /* Map FE-specific LUN ID into global one. */
13888 port = ctl_io_port(&io->io_hdr);
13889 io->io_hdr.nexus.targ_mapped_lun =
13890 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
13892 switch (io->io_hdr.io_type) {
13895 if (ctl_debug & CTL_DEBUG_CDB)
13897 ctl_enqueue_incoming(io);
13900 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type);
13904 return (CTL_RETVAL_COMPLETE);
13907 #ifdef CTL_IO_DELAY
13909 ctl_done_timer_wakeup(void *arg)
13913 io = (union ctl_io *)arg;
13916 #endif /* CTL_IO_DELAY */
13919 ctl_done(union ctl_io *io)
13923 * Enable this to catch duplicate completion issues.
13926 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) {
13927 printf("%s: type %d msg %d cdb %x iptl: "
13928 "%d:%d:%d:%d tag 0x%04x "
13929 "flag %#x status %x\n",
13931 io->io_hdr.io_type,
13932 io->io_hdr.msg_type,
13934 io->io_hdr.nexus.initid.id,
13935 io->io_hdr.nexus.targ_port,
13936 io->io_hdr.nexus.targ_target.id,
13937 io->io_hdr.nexus.targ_lun,
13938 (io->io_hdr.io_type ==
13940 io->taskio.tag_num :
13941 io->scsiio.tag_num,
13943 io->io_hdr.status);
13945 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE;
13949 * This is an internal copy of an I/O, and should not go through
13950 * the normal done processing logic.
13952 if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
13956 * We need to send a msg to the serializing shelf to finish the IO
13957 * as well. We don't send a finish message to the other shelf if
13958 * this is a task management command. Task management commands
13959 * aren't serialized in the OOA queue, but rather just executed on
13960 * both shelf controllers for commands that originated on that
13963 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)
13964 && (io->io_hdr.io_type != CTL_IO_TASK)) {
13965 union ctl_ha_msg msg_io;
13967 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO;
13968 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc;
13969 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io,
13970 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) {
13972 /* continue on to finish IO */
13974 #ifdef CTL_IO_DELAY
13975 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
13976 struct ctl_lun *lun;
13978 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13980 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
13982 struct ctl_lun *lun;
13984 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
13987 && (lun->delay_info.done_delay > 0)) {
13988 struct callout *callout;
13990 callout = (struct callout *)&io->io_hdr.timer_bytes;
13991 callout_init(callout, /*mpsafe*/ 1);
13992 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
13993 callout_reset(callout,
13994 lun->delay_info.done_delay * hz,
13995 ctl_done_timer_wakeup, io);
13996 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT)
13997 lun->delay_info.done_delay = 0;
14001 #endif /* CTL_IO_DELAY */
14003 ctl_enqueue_done(io);
14007 ctl_isc(struct ctl_scsiio *ctsio)
14009 struct ctl_lun *lun;
14012 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
14014 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0]));
14016 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n"));
14018 retval = lun->backend->data_submit((union ctl_io *)ctsio);
14025 ctl_work_thread(void *arg)
14027 struct ctl_thread *thr = (struct ctl_thread *)arg;
14028 struct ctl_softc *softc = thr->ctl_softc;
14032 CTL_DEBUG_PRINT(("ctl_work_thread starting\n"));
14038 * We handle the queues in this order:
14040 * - done queue (to free up resources, unblock other commands)
14044 * If those queues are empty, we break out of the loop and
14047 mtx_lock(&thr->queue_lock);
14048 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue);
14050 STAILQ_REMOVE_HEAD(&thr->isc_queue, links);
14051 mtx_unlock(&thr->queue_lock);
14052 ctl_handle_isc(io);
14055 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue);
14057 STAILQ_REMOVE_HEAD(&thr->done_queue, links);
14058 /* clear any blocked commands, call fe_done */
14059 mtx_unlock(&thr->queue_lock);
14060 retval = ctl_process_done(io);
14063 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue);
14065 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links);
14066 mtx_unlock(&thr->queue_lock);
14067 if (io->io_hdr.io_type == CTL_IO_TASK)
14070 ctl_scsiio_precheck(softc, &io->scsiio);
14073 if (!ctl_pause_rtr) {
14074 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue);
14076 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links);
14077 mtx_unlock(&thr->queue_lock);
14078 retval = ctl_scsiio(&io->scsiio);
14079 if (retval != CTL_RETVAL_COMPLETE)
14080 CTL_DEBUG_PRINT(("ctl_scsiio failed\n"));
14085 /* Sleep until we have something to do. */
14086 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0);
14091 ctl_lun_thread(void *arg)
14093 struct ctl_softc *softc = (struct ctl_softc *)arg;
14094 struct ctl_be_lun *be_lun;
14097 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n"));
14101 mtx_lock(&softc->ctl_lock);
14102 be_lun = STAILQ_FIRST(&softc->pending_lun_queue);
14103 if (be_lun != NULL) {
14104 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links);
14105 mtx_unlock(&softc->ctl_lock);
14106 ctl_create_lun(be_lun);
14110 /* Sleep until we have something to do. */
14111 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock,
14112 PDROP | PRIBIO, "-", 0);
14117 ctl_thresh_thread(void *arg)
14119 struct ctl_softc *softc = (struct ctl_softc *)arg;
14120 struct ctl_lun *lun;
14121 struct ctl_be_lun *be_lun;
14122 struct scsi_da_rw_recovery_page *rwpage;
14123 struct ctl_logical_block_provisioning_page *page;
14125 uint64_t thres, val;
14128 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n"));
14131 mtx_lock(&softc->ctl_lock);
14132 STAILQ_FOREACH(lun, &softc->lun_list, links) {
14133 be_lun = lun->be_lun;
14134 if ((lun->flags & CTL_LUN_DISABLED) ||
14135 (lun->flags & CTL_LUN_OFFLINE) ||
14136 lun->backend->lun_attr == NULL)
14138 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT];
14139 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0)
14142 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT];
14143 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) {
14144 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0)
14146 thres = scsi_4btoul(page->descr[i].count);
14147 thres <<= CTL_LBP_EXPONENT;
14148 switch (page->descr[i].resource) {
14150 attr = "blocksavail";
14153 attr = "blocksused";
14156 attr = "poolblocksavail";
14159 attr = "poolblocksused";
14164 mtx_unlock(&softc->ctl_lock); // XXX
14165 val = lun->backend->lun_attr(
14166 lun->be_lun->be_lun, attr);
14167 mtx_lock(&softc->ctl_lock);
14168 if (val == UINT64_MAX)
14170 if ((page->descr[i].flags & SLBPPD_ARMING_MASK)
14171 == SLBPPD_ARMING_INC)
14172 e |= (val >= thres);
14174 e |= (val <= thres);
14176 mtx_lock(&lun->lun_lock);
14178 if (lun->lasttpt == 0 ||
14179 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) {
14180 lun->lasttpt = time_uptime;
14181 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
14185 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES);
14187 mtx_unlock(&lun->lun_lock);
14189 mtx_unlock(&softc->ctl_lock);
14190 pause("-", CTL_LBP_PERIOD * hz);
14195 ctl_enqueue_incoming(union ctl_io *io)
14197 struct ctl_softc *softc = control_softc;
14198 struct ctl_thread *thr;
14201 idx = (io->io_hdr.nexus.targ_port * 127 +
14202 io->io_hdr.nexus.initid.id) % worker_threads;
14203 thr = &softc->threads[idx];
14204 mtx_lock(&thr->queue_lock);
14205 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links);
14206 mtx_unlock(&thr->queue_lock);
14211 ctl_enqueue_rtr(union ctl_io *io)
14213 struct ctl_softc *softc = control_softc;
14214 struct ctl_thread *thr;
14216 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
14217 mtx_lock(&thr->queue_lock);
14218 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links);
14219 mtx_unlock(&thr->queue_lock);
14224 ctl_enqueue_done(union ctl_io *io)
14226 struct ctl_softc *softc = control_softc;
14227 struct ctl_thread *thr;
14229 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
14230 mtx_lock(&thr->queue_lock);
14231 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links);
14232 mtx_unlock(&thr->queue_lock);
14237 ctl_enqueue_isc(union ctl_io *io)
14239 struct ctl_softc *softc = control_softc;
14240 struct ctl_thread *thr;
14242 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
14243 mtx_lock(&thr->queue_lock);
14244 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links);
14245 mtx_unlock(&thr->queue_lock);
14249 /* Initialization and failover */
14252 ctl_init_isc_msg(void)
14254 printf("CTL: Still calling this thing\n");
14259 * Initializes component into configuration defined by bootMode
14261 * returns hasc_Status:
14263 * ERROR - fatal error
14265 static ctl_ha_comp_status
14266 ctl_isc_init(struct ctl_ha_component *c)
14268 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
14275 * Starts component in state requested. If component starts successfully,
14276 * it must set its own state to the requestrd state
14277 * When requested state is HASC_STATE_HA, the component may refine it
14278 * by adding _SLAVE or _MASTER flags.
14279 * Currently allowed state transitions are:
14280 * UNKNOWN->HA - initial startup
14281 * UNKNOWN->SINGLE - initial startup when no parter detected
14282 * HA->SINGLE - failover
14283 * returns ctl_ha_comp_status:
14284 * OK - component successfully started in requested state
14285 * FAILED - could not start the requested state, failover may
14287 * ERROR - fatal error detected, no future startup possible
14289 static ctl_ha_comp_status
14290 ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state)
14292 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK;
14294 printf("%s: go\n", __func__);
14296 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap)
14297 if (c->state == CTL_HA_STATE_UNKNOWN ) {
14298 control_softc->is_single = 0;
14299 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler)
14300 != CTL_HA_STATUS_SUCCESS) {
14301 printf("ctl_isc_start: ctl_ha_msg_create failed.\n");
14302 ret = CTL_HA_COMP_STATUS_ERROR;
14304 } else if (CTL_HA_STATE_IS_HA(c->state)
14305 && CTL_HA_STATE_IS_SINGLE(state)){
14306 // HA->SINGLE transition
14308 control_softc->is_single = 1;
14310 printf("ctl_isc_start:Invalid state transition %X->%X\n",
14312 ret = CTL_HA_COMP_STATUS_ERROR;
14314 if (CTL_HA_STATE_IS_SINGLE(state))
14315 control_softc->is_single = 1;
14323 * Quiesce component
14324 * The component must clear any error conditions (set status to OK) and
14325 * prepare itself to another Start call
14326 * returns ctl_ha_comp_status:
14330 static ctl_ha_comp_status
14331 ctl_isc_quiesce(struct ctl_ha_component *c)
14333 int ret = CTL_HA_COMP_STATUS_OK;
14340 struct ctl_ha_component ctl_ha_component_ctlisc =
14343 .state = CTL_HA_STATE_UNKNOWN,
14344 .init = ctl_isc_init,
14345 .start = ctl_isc_start,
14346 .quiesce = ctl_isc_quiesce